summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYaroslav Halchenko <debian@onerussian.com>2014-06-14 13:34:18 +0200
committerYaroslav Halchenko <debian@onerussian.com>2014-06-14 13:34:18 +0200
commit9f010b1561421d8b3e128e07b50de9919955f258 (patch)
tree8b692a5cc9abe7024da3a6809f5c5dc5b7c6b7b6
nitime (0.5-1) unstable; urgency=medium
* New release - does not ship sphinxext/inheritance_diagram.py any longer (Closes: #706533) * debian/copyright - extended to cover added 3rd party snippets and updated years * debian/watch - updated to use githubredir.debian.net service * debian/patches - debian/patches/up_version_info_python2.6 for compatibility with python2.6 (on wheezy etc) # imported from the archive
-rw-r--r--.gitignore2
-rw-r--r--.mailmap9
-rw-r--r--.travis.yml33
-rw-r--r--INSTALL4
-rw-r--r--LICENSE30
-rw-r--r--MANIFEST.in18
-rw-r--r--README.txt52
-rw-r--r--THANKS38
-rw-r--r--debian/blends9
-rw-r--r--debian/changelog82
-rw-r--r--debian/compat1
-rw-r--r--debian/control47
-rw-r--r--debian/copyright124
-rw-r--r--debian/docs2
-rw-r--r--debian/gbp.conf18
-rw-r--r--debian/patches/deb_no_sources_for_docs12
-rw-r--r--debian/patches/series2
-rw-r--r--debian/patches/up_version_info_python2.622
-rw-r--r--debian/python-nitime-doc.doc-base9
-rw-r--r--debian/python-nitime-doc.docs1
-rw-r--r--debian/python-nitime-doc.links1
-rw-r--r--debian/python-nitime.install1
-rwxr-xr-xdebian/rules61
-rw-r--r--debian/source/format1
-rw-r--r--debian/watch4
-rw-r--r--doc/Makefile111
-rw-r--r--doc/_static/Scipy2009Nitime.pdfbin0 -> 1948568 bytes
-rw-r--r--doc/_static/nipy.css507
-rw-r--r--doc/_static/nitime-banner-bg.pngbin0 -> 37013 bytes
-rw-r--r--doc/_static/nitime-banner.svgzbin0 -> 160837 bytes
-rw-r--r--doc/_static/nitime.css22
-rw-r--r--doc/_static/reggie2.pngbin0 -> 9531 bytes
-rw-r--r--doc/_templates/layout.html85
-rw-r--r--doc/_templates/quicklinks.html0
-rw-r--r--doc/api/index.rst10
-rw-r--r--doc/conf.py279
-rw-r--r--doc/devel/branch_list.pngbin0 -> 13361 bytes
-rw-r--r--doc/devel/branch_list_compare.pngbin0 -> 10679 bytes
-rw-r--r--doc/devel/configure_git.rst123
-rw-r--r--doc/devel/development_workflow.rst233
-rw-r--r--doc/devel/dot2_dot3.rst28
-rw-r--r--doc/devel/following_latest.rst36
-rw-r--r--doc/devel/forking_button.pngbin0 -> 13092 bytes
-rw-r--r--doc/devel/forking_hell.rst33
-rw-r--r--doc/devel/git_development.rst16
-rw-r--r--doc/devel/git_install.rst26
-rw-r--r--doc/devel/git_intro.rst18
-rw-r--r--doc/devel/git_links.inc67
-rw-r--r--doc/devel/git_links.txt61
-rw-r--r--doc/devel/git_resources.rst57
-rw-r--r--doc/devel/how_to_release.rst60
-rw-r--r--doc/devel/index.rst18
-rw-r--r--doc/devel/patching.rst123
-rw-r--r--doc/devel/pull_button.pngbin0 -> 12893 bytes
-rw-r--r--doc/devel/set_up_fork.rst68
-rw-r--r--doc/devel/usecases.rst7
-rw-r--r--doc/discussion/base_classes.rst139
-rw-r--r--doc/discussion/index.rst14
-rw-r--r--doc/discussion/interval_object.rst170
-rw-r--r--doc/discussion/multitaper_jackknife.rst83
-rw-r--r--doc/discussion/note_about_discussion.rst14
-rw-r--r--doc/discussion/time_series_access.rst156
-rw-r--r--doc/documentation.rst20
-rwxr-xr-xdoc/examples/ar_est_1var.py100
-rw-r--r--doc/examples/ar_est_2vars.py355
-rw-r--r--doc/examples/ar_est_3vars.py254
-rw-r--r--doc/examples/ar_model_fit.py149
-rw-r--r--doc/examples/event_related_fmri.py147
-rw-r--r--doc/examples/filtering_fmri.py436
-rw-r--r--doc/examples/granger_fmri.py226
-rw-r--r--doc/examples/grasshopper.py221
-rw-r--r--doc/examples/mtm_baseband_power.py178
-rw-r--r--doc/examples/mtm_harmonic_test.py144
-rwxr-xr-xdoc/examples/multi_taper_coh.py352
-rw-r--r--doc/examples/multi_taper_spectral_estimation.py416
-rw-r--r--doc/examples/note_about_examples.txt18
-rw-r--r--doc/examples/resting_state_fmri.py366
-rw-r--r--doc/examples/seed_analysis.py273
-rw-r--r--doc/examples/snr_example.py185
-rw-r--r--doc/index.rst25
-rw-r--r--doc/links_names.txt110
-rw-r--r--doc/news.rst15
-rw-r--r--doc/sphinxext/README.txt24
-rw-r--r--doc/sphinxext/docscrape.py497
-rw-r--r--doc/sphinxext/docscrape_sphinx.py136
-rw-r--r--doc/sphinxext/github.py155
-rw-r--r--doc/sphinxext/ipython_console_highlighting.py98
-rw-r--r--doc/sphinxext/math_dollar.py56
-rw-r--r--doc/sphinxext/numpydoc.py116
-rw-r--r--doc/sphinxext/only_directives.py63
-rw-r--r--doc/sphinxext/plot_directive.py489
-rw-r--r--doc/users/index.rst18
-rw-r--r--doc/users/install.rst84
-rw-r--r--doc/users/overview.rst258
-rw-r--r--doc/users/quickstart.rst19
-rw-r--r--doc/users/tutorial.rst101
-rw-r--r--doc/whatsnew/development.rst9
-rw-r--r--doc/whatsnew/index.rst28
-rw-r--r--doc/whatsnew/version0.3.rst123
-rw-r--r--doc/whatsnew/version0.4.rst111
-rw-r--r--doc/whatsnew/version0.5.rst91
-rw-r--r--nitime/LICENSE30
-rw-r--r--nitime/__init__.py33
-rw-r--r--nitime/_mpl_units.py226
-rw-r--r--nitime/_utils.pyx64
-rw-r--r--nitime/algorithms/__init__.py61
-rw-r--r--nitime/algorithms/autoregressive.py525
-rw-r--r--nitime/algorithms/cohere.py1287
-rw-r--r--nitime/algorithms/correlation.py16
-rw-r--r--nitime/algorithms/event_related.py150
-rw-r--r--nitime/algorithms/filter.py94
-rw-r--r--nitime/algorithms/spectral.py959
-rw-r--r--nitime/algorithms/tests/__init__.py0
-rw-r--r--nitime/algorithms/tests/test_autoregressive.py216
-rw-r--r--nitime/algorithms/tests/test_coherence.py395
-rw-r--r--nitime/algorithms/tests/test_correlation.py14
-rw-r--r--nitime/algorithms/tests/test_event_related.py28
-rw-r--r--nitime/algorithms/tests/test_spectral.py479
-rw-r--r--nitime/algorithms/wavelet.py107
-rw-r--r--nitime/analysis/__init__.py33
-rw-r--r--nitime/analysis/base.py43
-rw-r--r--nitime/analysis/coherence.py730
-rw-r--r--nitime/analysis/correlation.py160
-rw-r--r--nitime/analysis/event_related.py388
-rw-r--r--nitime/analysis/granger.py216
-rw-r--r--nitime/analysis/normalization.py33
-rw-r--r--nitime/analysis/snr.py148
-rw-r--r--nitime/analysis/spectral.py660
-rw-r--r--nitime/analysis/tests/__init__.py0
-rw-r--r--nitime/analysis/tests/test_base.py21
-rw-r--r--nitime/analysis/tests/test_coherence.py210
-rw-r--r--nitime/analysis/tests/test_correlation.py27
-rw-r--r--nitime/analysis/tests/test_granger.py113
-rw-r--r--nitime/analysis/tests/test_snr.py25
-rw-r--r--nitime/data/event_related_fmri.csv3361
-rw-r--r--nitime/data/fmri1.nii.gzbin0 -> 100672 bytes
-rw-r--r--nitime/data/fmri2.nii.gzbin0 -> 101564 bytes
-rwxr-xr-xnitime/data/fmri_timeseries.csv251
-rwxr-xr-xnitime/data/grasshopper_spike_times1.txt945
-rwxr-xr-xnitime/data/grasshopper_spike_times2.txt885
-rwxr-xr-xnitime/data/grasshopper_stimulus1.txt200000
-rwxr-xr-xnitime/data/grasshopper_stimulus2.txt200000
-rw-r--r--nitime/descriptors.py179
-rw-r--r--nitime/fmri/__init__.py20
-rw-r--r--nitime/fmri/hrf.py105
-rw-r--r--nitime/fmri/io.py241
-rw-r--r--nitime/fmri/tests/__init__.py0
-rw-r--r--nitime/fmri/tests/test_io.py84
-rw-r--r--nitime/index_utils.py532
-rw-r--r--nitime/lazy.py57
-rw-r--r--nitime/lazyimports.py98
-rw-r--r--nitime/six.py585
-rw-r--r--nitime/testlib.py130
-rw-r--r--nitime/tests/__init__.py0
-rw-r--r--nitime/tests/cxy_matlab.txt50
-rw-r--r--nitime/tests/dpss_matlab.txt100
-rw-r--r--nitime/tests/fxx_matlab.txt273
-rw-r--r--nitime/tests/long_dpss_matlab.npybin0 -> 1334480 bytes
-rw-r--r--nitime/tests/test_algorithms.py185
-rw-r--r--nitime/tests/test_analysis.py304
-rw-r--r--nitime/tests/test_descriptors.py42
-rw-r--r--nitime/tests/test_lazy.py40
-rw-r--r--nitime/tests/test_timeseries.py912
-rw-r--r--nitime/tests/test_utils.py308
-rw-r--r--nitime/tests/test_viz.py45
-rw-r--r--nitime/tests/tseries12.txt2
-rw-r--r--nitime/timeseries.py1590
-rw-r--r--nitime/utils.py2226
-rw-r--r--nitime/version.py109
-rw-r--r--nitime/viz.py1427
-rwxr-xr-xsetup.py63
-rw-r--r--setup_egg.py2
-rw-r--r--tools/apigen.py426
-rwxr-xr-xtools/build_modref_templates.py19
-rwxr-xr-xtools/build_release31
-rwxr-xr-xtools/ex2rst277
-rwxr-xr-xtools/github_stats.py149
-rwxr-xr-xtools/gitwash_dumper.py151
-rwxr-xr-xtools/make_examples.py98
-rwxr-xr-xtools/release31
-rwxr-xr-xtools/sneeze.py50
-rw-r--r--tools/toollib.py34
182 files changed, 433521 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f3d74a9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+*.pyc
+*~
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..58cfa28
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,9 @@
+Ariel Rokem <arokem@berkeley.edu> arokem <arokem@berkeley.edu>
+Ariel Rokem <arokem@gmail.com> arokem <arokem@gmail.com>
+Michael Waskom <mwaskom@mit.edu> mwaskom <mwaskom@mit.edu>
+Mike Trumpis <mtrumpis@berkeley.edu> miketrumpis <mtrumpis@gmail.com>
+Jarrod Millman <jarrod.millman@gmail.com> Jarrod Millman <millman@berkeley.edu>
+Jarrod Millman <jarrod.millman@gmail.com> jarrodmillman <millman@berkeley.edu>
+Jarrod Millman <jarrod.millman@gmail.com> jarrod <jarrod@fedora11.(none)>
+Kilian Koepsell <kilian@berkeley.edu> kilian@berkeley.edu <kilian@phasor.local>
+Kilian Koepsell <kilian@berkeley.edu> koepsell <kilian@berkeley.edu>
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..f83e536
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,33 @@
+# vim ft=yaml
+# travis-ci.org definition for nipy build
+#
+# We pretend to be erlang because we need can't use the python support in
+# travis-ci; it uses virtualenvs, they do not have numpy, scipy, matplotlib,
+# and it is impractical to build them
+language: erlang
+env:
+ - PYTHON=python PYSUF=''
+ - PYTHON=python3 PYSUF='3'
+install:
+ - sudo add-apt-repository -y ppa:takluyver/python3
+ - sudo add-apt-repository -y ppa:chris-lea/cython
+ - sudo apt-get update
+ - sudo apt-get install $PYTHON-dev $PYTHON-numpy $PYTHON-scipy $PYTHON-matplotlib $PYTHON-setuptools $PYTHON-nose
+ - sudo easy_install$PYSUF nibabel networkx # Latest pypi
+ ## Cython easy_install breaks with error about refnanny.c; maybe something
+ ## to do with having a previous cython version;
+ ## http://mail.python.org/pipermail//cython-devel/2012-April/002344.html
+ ## (for now, we are using chris-lea's PPA instead of installing manually)
+ #- curl -O http://www.cython.org/release/Cython-0.18.zip
+ #- unzip Cython-0.18.zip
+ #- cd Cython-0.18
+ #- sudo python$PYSUF setup.py install
+ #- cd ..
+ # NITIME:
+ - $PYTHON setup.py build
+ - sudo $PYTHON setup.py install
+script:
+ # Change into an innocuous directory and find tests from installation
+ - mkdir for_test
+ - cd for_test
+ - nosetests$PYSUF --with-doctest `$PYTHON -c "import os; import nitime; print(os.path.dirname(nitime.__file__))"`
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 0000000..087715b
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,4 @@
+For installation instructions see documentation:
+
+http://nipy.org/nitime/
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..934d189
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,30 @@
+Copyright (c) 2006-2011, NIPY Developers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the NIPY Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..4911aa3
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,18 @@
+include *.txt *.py
+include THANKS
+include LICENSE
+include INSTALL
+
+graft nitime
+graft doc
+graft tools
+
+# docs subdirs we want to skip
+prune doc/_build
+prune doc/api/generated
+
+global-exclude *~
+global-exclude *.flc
+global-exclude *.pyc
+global-exclude .dircopy.log
+global-exclude .git
diff --git a/README.txt b/README.txt
new file mode 100644
index 0000000..fbe77ce
--- /dev/null
+++ b/README.txt
@@ -0,0 +1,52 @@
+===================================================
+ Nitime: timeseries analysis for neuroscience data
+===================================================
+
+Nitime contains a core of numerical algorithms for time-series analysis both in
+the time and spectral domains, a set of container objects to represent
+time-series, and auxiliary objects that expose a high level interface to the
+numerical machinery and make common analysis tasks easy to express with compact
+and semantically clear code.
+
+Website
+=======
+
+Current information can always be found at the NIPY website is located
+here::
+
+ http://nipy.org/nitime
+
+Mailing Lists
+=============
+
+Please see the developer's list here::
+
+ http://mail.scipy.org/mailman/listinfo/nipy-devel
+
+Code
+====
+
+You can find our sources and single-click downloads:
+
+* `Main repository`_ on Github.
+* Documentation_ for all releases and current development tree.
+* Download as a tar/zip file the `current trunk`_.
+* Downloads of all `available releases`_.
+
+.. _main repository: http://github.com/nipy/nitime
+.. _Documentation: http://nipy.org/nitime
+.. _current trunk: http://github.com/nipy/nitime/archives/master
+.. _available releases: http://github.com/nipy/nitime/downloads
+
+
+License information
+===================
+
+Nitime is licensed under the terms of the new BSD license. See the file
+"LICENSE" for information on the history of this software, terms & conditions
+for usage, and a DISCLAIMER OF ALL WARRANTIES.
+
+All trademarks referenced herein are property of their respective holders.
+
+Copyright (c) 2006-2011, NIPY Developers
+All rights reserved.
diff --git a/THANKS b/THANKS
new file mode 100644
index 0000000..c4f4367
--- /dev/null
+++ b/THANKS
@@ -0,0 +1,38 @@
+NiTime is an open source project for the analysis of timeseries in Python, with
+a focus on problems in neuroscience experimental datasets. It is part of the
+NIPY project (though it can be installed separately). Many people have
+contributed to NIPY, in code development, suggestions, and financial support.
+Below is a partial list. If you've been left off, please let us know
+(nipy-devel at neuroimaging.scipy.org), and you'll be added.
+
+Tim Blanche
+Matthew Brett
+Christopher Burns
+Michael Castelle
+Philippe Ciuciu
+Dav Clark
+Yann Cointepas
+Mark D'Esposito
+Drew Fegen
+Alex Gramfort
+Yaroslav Halchenko
+Brian Hawthorne
+Paul Ivanov
+Kilian Koepsell
+Tim Leslie
+Cindee Madison
+Jarrod Millman
+Fernando Perez
+Josef Perktold
+Jean-Baptiste Poline
+Denis Riviere
+Alexis Roche
+Ariel Rokem
+Lavi Secundo
+Felice Sun
+Jonathan Taylor
+Bertrand Thirion
+Bernjamin Thyreau
+Mike Trumpis
+Karl Young
+Tom Waite
diff --git a/debian/blends b/debian/blends
new file mode 100644
index 0000000..de60ff9
--- /dev/null
+++ b/debian/blends
@@ -0,0 +1,9 @@
+Format: extended
+Tasks: debian-med/imaging-dev
+Depends: python-nitime
+Language: Python
+
+Tasks: debian-med/imaging
+Recommends: python-nitime
+Why: Although listed in -dev task, it also has a strong focus on interactive
+ data analysis.
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..bdc7a97
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,82 @@
+nitime (0.5-1) unstable; urgency=medium
+
+ * New release
+ - does not ship sphinxext/inheritance_diagram.py any longer
+ (Closes: #706533)
+ * debian/copyright
+ - extended to cover added 3rd party snippets and updated years
+ * debian/watch
+ - updated to use githubredir.debian.net service
+ * debian/patches
+ - debian/patches/up_version_info_python2.6 for compatibility with
+ python2.6 (on wheezy etc)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 14 Jun 2014 07:34:18 -0400
+
+nitime (0.4-2) unstable; urgency=low
+
+ * Added graphviz to Build-Depends (Closes: #608908)
+ Sorry that I have missed in -1 upload
+ * Adding additional cleaning to assure pristine state for source
+ package rebuilding (Closes: #643226, original report was about version.py
+ being modified -- seems to be not the case any longer)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 20 Jun 2012 09:01:35 -0400
+
+nitime (0.4-1) unstable; urgency=low
+
+ * New upstream release
+ - fix commit for lazyimports pickling
+ - compatible with scipy 0.10 API breakage (Closes: #671978)
+ * debian/control:
+ - added XS-Python-Version >= 2.6 (for squeeze backports)
+ - reindented/untabified *Depends
+ - boosted policy compliance to 3.9.3 (no further changes)
+ - upcased Depends in ${python:Depends}. Hopefully finally it
+ (Closes: #614220)
+ * debian/copyright:
+ - updated years and fixed for DEP5 compliance
+ * debian/rules
+ - disable test_lazy_reload test (known to fail whenever ran by nosetest)
+ - export HOME=$(CURDIR)/build just to avoid possible FTBFs
+ * debian/watch
+ - adjusted to fetch from tags
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 06 Jun 2012 16:04:24 -0400
+
+nitime (0.3.1-1) unstable; urgency=low
+
+ * Fresh bugfix release: addresses compatibility concerns allowing easy
+ backporting
+ * CP commit to fixup __version__ to report 0.3.1 instead of 0.4.dev
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 17 Aug 2011 17:35:17 -0400
+
+nitime (0.3-1) UNRELEASED; urgency=low
+
+ * Fresh upstream release
+ * Adjusted debian/watch and added a rudimentary get-orig-source which
+ uses uscan to fetch tarballs from github
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 15 Aug 2011 16:29:48 -0400
+
+nitime (0.2.99-1) unstable; urgency=low
+
+ * Pre-0.3 snapshot release
+ * Boost policy compliance to 3.9.2 (no changes due)
+ * Assure off-screen backend (Agg) for matplotlib while building docs
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 29 May 2011 21:48:41 -0400
+
+nitime (0.2-2) unstable; urgency=low
+
+ * Proper casing in ${python:depends} (Closes: #614220).
+ Thanks Jakub Wilk for the report and for the fix
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 20 Feb 2011 09:40:41 -0500
+
+nitime (0.2-1) unstable; urgency=low
+
+ * Initial release (Closes: #600714)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 22 Oct 2010 14:32:15 -0400
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..7f8f011
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+7
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..ac60d05
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,47 @@
+Source: nitime
+Section: python
+Priority: extra
+Maintainer: NeuroDebian Team <team@neuro.debian.net>
+Uploaders: Yaroslav Halchenko <debian@onerussian.com>, Michael Hanke <michael.hanke@gmail.com>
+Build-Depends: debhelper (>= 7.0.50~), python-all, python-support,
+ python-numpy,
+ python-scipy,
+ python-matplotlib, python-tk,
+ python-sphinx,
+ python-nose,
+ python-networkx,
+ python-nibabel,
+ graphviz,
+Standards-Version: 3.9.3
+XS-Python-Version: >= 2.6
+Homepage: http://nipy.org/nitime
+Vcs-Git: git://github.com/yarikoptic/nitime.git
+Vcs-Browser: http://github.com/yarikoptic/nitime
+
+Package: python-nitime
+Architecture: all
+Depends: ${python:Depends}, ${shlibs:Depends}, ${misc:Depends},
+ python-numpy, python-scipy
+Recommends: python-matplotlib,
+ python-nose,
+ python-nibabel,
+ python-networkx
+Description: timeseries analysis for neuroscience data (nitime)
+ Nitime is a Python module for time-series analysis of data from
+ neuroscience experiments. It contains a core of numerical algorithms
+ for time-series analysis both in the time and spectral domains, a set
+ of container objects to represent time-series, and auxiliary objects
+ that expose a high level interface to the numerical machinery and
+ make common analysis tasks easy to express with compact and
+ semantically clear code.
+
+Package: python-nitime-doc
+Architecture: all
+Section: doc
+Depends: ${misc:Depends}, libjs-jquery
+Suggests: python-nitime
+Description: timeseries analysis for neuroscience data (nitime) -- documentation
+ Nitime is a Python module for time-series analysis of data from
+ neuroscience experiments.
+ .
+ This package provides the documentation in HTML format.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..824d75a
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,124 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: nitime
+Upstream-Contact: nipy-devel@neuroimaging.scipy.org
+Source: http://github.com/nipy/nitime
+
+
+Files: *
+Copyright: 2006-2014, NIPY Developers
+License: BSD-3
+
+Files: doc/sphinxext/*
+Copyright: 2007-2009, Stefan van der Walt and Sphinx team
+License: BSD-3
+
+Files: nitime/tests/decotest.py
+Copyright: 2009, The IPython Development Team
+License: BSD-3
+
+Files: nitime/_mpl_units.py
+Copyright: 2012-2013, Matplotlib Development Team
+License: PSF-Matplotlib-license
+Comment: for compatibility with older matplotlib versions
+
+Files: nitime/six.py
+Copyright: 2010-2013, Benjamin Peterson
+License: Expat
+
+Files: debian/*
+Copyright: 2010-2014, Yaroslav Halchenko <debian@onerussian.com>
+License: BSD-3
+
+
+License: BSD-3
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the NIPY Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: PSF-Matplotlib-license
+ 1. This LICENSE AGREEMENT is between the Matplotlib Development Team
+ ("MDT"), and the Individual or Organization ("Licensee") accessing and
+ otherwise using matplotlib software in source or binary form and its
+ associated documentation.
+ .
+ 2. Subject to the terms and conditions of this License Agreement, MDT
+ hereby grants Licensee a nonexclusive, royalty-free, world-wide license
+ to reproduce, analyze, test, perform and/or display publicly, prepare
+ derivative works, distribute, and otherwise use matplotlib 1.3.1
+ alone or in any derivative version, provided, however, that MDT's
+ License Agreement and MDT's notice of copyright, i.e., "Copyright (c)
+ 2012-2013 Matplotlib Development Team; All Rights Reserved" are retained in
+ matplotlib 1.3.1 alone or in any derivative version prepared by
+ Licensee.
+ .
+ 3. In the event Licensee prepares a derivative work that is based on or
+ incorporates matplotlib 1.3.1 or any part thereof, and wants to
+ make the derivative work available to others as provided herein, then
+ Licensee hereby agrees to include in any such work a brief summary of
+ the changes made to matplotlib 1.3.1.
+ .
+ 4. MDT is making matplotlib 1.3.1 available to Licensee on an "AS
+ IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+ IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND
+ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 1.3.1
+ WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+ .
+ 5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
+ 1.3.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
+ LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
+ MATPLOTLIB 1.3.1, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
+ THE POSSIBILITY THEREOF.
+ .
+ 6. This License Agreement will automatically terminate upon a material
+ breach of its terms and conditions.
+ .
+ 7. Nothing in this License Agreement shall be deemed to create any
+ relationship of agency, partnership, or joint venture between MDT and
+ Licensee. This License Agreement does not grant permission to use MDT
+ trademarks or trade name in a trademark sense to endorse or promote
+ products or services of Licensee, or any third party.
+ .
+ 8. By copying, installing or otherwise using matplotlib 1.3.1,
+ Licensee agrees to be bound by the terms and conditions of this License
+ Agreement.
+
+License: Expat
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+ .
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+ .
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/debian/docs b/debian/docs
new file mode 100644
index 0000000..fa6b147
--- /dev/null
+++ b/debian/docs
@@ -0,0 +1,2 @@
+README.txt
+README.txt
diff --git a/debian/gbp.conf b/debian/gbp.conf
new file mode 100644
index 0000000..0e3ce00
--- /dev/null
+++ b/debian/gbp.conf
@@ -0,0 +1,18 @@
+[DEFAULT]
+upstream-branch = master
+debian-branch = debian
+upstream-tag = %(version)s
+debian-tag = debian/%(version)s
+
+# Options only affecting git-buildpackage
+[git-buildpackage]
+# ignore some any non-gitted files
+ignore-new = True
+#upstream-branch = dfsgclean
+# uncomment this to automatically GPG sign tags
+sign-tags = True
+# use this for more svn-buildpackage like bahaviour:
+export-dir = ../build-area/
+tarball-dir = ../tarballs/
+
+
diff --git a/debian/patches/deb_no_sources_for_docs b/debian/patches/deb_no_sources_for_docs
new file mode 100644
index 0000000..d95e4a6
--- /dev/null
+++ b/debian/patches/deb_no_sources_for_docs
@@ -0,0 +1,12 @@
+--- a/doc/conf.py
++++ b/doc/conf.py
+@@ -131,6 +131,9 @@ today_fmt = '%B %d, %Y, %H:%M PDT'
+ # for source files.
+ exclude_trees = ['_build']
+
++# If true, the reST sources are included in the HTML build as _sources/<name>.
++html_copy_source = False
++
+ # The reST default role (used for this markup: `text`) to use for all documents.
+ #default_role = None
+
diff --git a/debian/patches/series b/debian/patches/series
new file mode 100644
index 0000000..2ce853c
--- /dev/null
+++ b/debian/patches/series
@@ -0,0 +1,2 @@
+deb_no_sources_for_docs
+up_version_info_python2.6
diff --git a/debian/patches/up_version_info_python2.6 b/debian/patches/up_version_info_python2.6
new file mode 100644
index 0000000..7cf84f7
--- /dev/null
+++ b/debian/patches/up_version_info_python2.6
@@ -0,0 +1,22 @@
+diff --git a/nitime/tests/test_lazy.py b/nitime/tests/test_lazy.py
+index 70a9cda..bbb874f 100644
+--- a/nitime/tests/test_lazy.py
++++ b/nitime/tests/test_lazy.py
+@@ -28,11 +28,13 @@ def test_lazy_noreload():
+ mod = l.LazyImport('sys')
+ # accessing module dictionary will trigger an import
+ len(mod.__dict__)
+- if sys.version_info.major == 2:
++ # do not use named tuple feature for Python 2.6 compatibility
++ major, minor = sys.version_info[:2]
++ if major == 2:
+ npt.assert_raises(ImportError, reload, mod)
+- elif sys.version_info.major == 3:
++ elif major == 3:
+ import imp
+- if sys.version_info.minor == 2:
++ if minor == 2:
+ npt.assert_raises(ImportError, imp.reload, mod)
+- elif sys.version_info.minor == 3:
++ elif minor == 3:
+ npt.assert_raises(TypeError, imp.reload, mod)
diff --git a/debian/python-nitime-doc.doc-base b/debian/python-nitime-doc.doc-base
new file mode 100644
index 0000000..e3769c6
--- /dev/null
+++ b/debian/python-nitime-doc.doc-base
@@ -0,0 +1,9 @@
+Document: nitime
+Title: NiTime Manual
+Author: NiPy Developers
+Abstract: This manual provides Nitime user guide, examples, and developers reference.
+Section: Science/Data Analysis
+
+Format: HTML
+Index: /usr/share/doc/python-nitime-doc/html/index.html
+Files: /usr/share/doc/python-nitime-doc/html/*.html
diff --git a/debian/python-nitime-doc.docs b/debian/python-nitime-doc.docs
new file mode 100644
index 0000000..7123782
--- /dev/null
+++ b/debian/python-nitime-doc.docs
@@ -0,0 +1 @@
+doc/_build/html
diff --git a/debian/python-nitime-doc.links b/debian/python-nitime-doc.links
new file mode 100644
index 0000000..9f118e4
--- /dev/null
+++ b/debian/python-nitime-doc.links
@@ -0,0 +1 @@
+usr/share/javascript/jquery/jquery.js usr/share/doc/python-nitime-doc/html/_static/jquery.js
diff --git a/debian/python-nitime.install b/debian/python-nitime.install
new file mode 100644
index 0000000..326a444
--- /dev/null
+++ b/debian/python-nitime.install
@@ -0,0 +1 @@
+debian/tmp/usr/* usr/
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..93dce87
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,61 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+PACKAGE_NAME = python-nitime
+PACKAGE_ROOT_DIR = debian/${PACKAGE_NAME}
+INSTALL_PATH = $(CURDIR)/debian/tmp
+
+# default Python
+PYTHON=$(shell pyversions -d)
+
+# override matplotlib config directory
+export MPLCONFIGDIR=$(CURDIR)/build
+export HOME=$(CURDIR)/build
+
+%:
+ dh $@
+
+override_dh_auto_test:
+ : # Do not test just after build, lets install and then test
+
+override_dh_auto_install:
+ dh_auto_install
+
+ mkdir -p $(MPLCONFIGDIR) # just in case
+ echo "backend : Agg" >| $(MPLCONFIGDIR)/matplotlibrc
+ : # Prune duplicate LICENSE file
+ find debian/ -name LICENSE -delete
+ : # Only now lets build docs
+ifeq (,$(filter nodoc,$(DEB_BUILD_OPTIONS)))
+ export PYTHONPATH=$$(/bin/ls -d $(INSTALL_PATH)/usr/lib/$(PYTHON)/*-packages); \
+ $(MAKE) -C doc html
+ -rm doc/_build/html/_static/jquery.js
+ -rm -r doc/_build/html/_sources
+ : # objects inventory is of no use for the package
+ -rm doc/_build/html/objects.inv
+endif
+
+# All tests later on
+# cd build to prevent use of local/not-built source tree
+ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
+ cd build; \
+ for PYTHON in $(shell pyversions -r); do \
+ echo "I: Running NiTime unittests using $$PYTHON"; \
+ PYTHONPATH=$$(/bin/ls -d $(INSTALL_PATH)/usr/lib/$$PYTHON/*-packages) \
+ MPLCONFIGDIR=/tmp/ \
+ $$PYTHON /usr/bin/nosetests --exclude=test_lazy_reload nitime; \
+ done
+endif
+
+## immediately useable documentation
+## and exemplar data (they are small excerpts anyway)
+override_dh_compress:
+ dh_compress -X.py -X.html -X.css -X.jpg -X.txt -X.js -X.json -X.rtc -X.par -X.bin
+
+override_dh_clean:
+ dh_clean
+ @echo "I: Removing other generated material"
+ rm -rf build doc/_build doc/examples/fig doc/api/generated/ doc/examples/*rst
+
+get-orig-source:
+ -uscan --upstream-version 0 --rename
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/debian/watch b/debian/watch
new file mode 100644
index 0000000..f9099e3
--- /dev/null
+++ b/debian/watch
@@ -0,0 +1,4 @@
+version=3
+
+opts="uversionmangle=s/rc/~rc/g" \
+ http://githubredir.debian.net/github/nipy/nitime/ rel/(.+).tar.gz
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..1e339e4
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,111 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html api htmlonly latex changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html make the HTML documentation"
+ @echo " api make API documents only"
+ @echo " latex make the LaTeX, you can set PAPER=a4 or PAPER=letter"
+ @echo " pdf make <latex> and run the PDF generation"
+ @echo " changes make an overview of all changed/added/deprecated" \
+ "items (ChangeLog)"
+ @echo " linkcheck check all external links for integrity"
+ @echo " doctest run all doctests embedded in the documentation"
+ @echo " sf_fer_perez copy html files to sourceforge (fer_perez only)"
+ @echo " sf_arokem copy html files to sourceforge (arokem only)"
+
+clean:
+ -rm -rf _build/* *~ api/generated
+ -rm -rf build/*
+ -rm examples/fig/*
+ -rm examples/*.rst
+
+htmlonly:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html
+ @echo
+ @echo "Build finished. The HTML pages are in _build/html."
+
+api:
+ python ../tools/build_modref_templates.py
+ @echo "Build API docs finished."
+
+html: rstexamples api htmlonly
+ @echo "Build HTML and API finished."
+
+html-no-exec: rstexamples-no-exec api htmlonly
+ @echo "Build HTML and API finished."
+
+latex: api
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in _build/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+pdf: latex
+ cd _build/latex && make all-pdf
+
+all: html pdf
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes
+ @echo
+ @echo "The overview file is in _build/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in _build/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in _build/doctest/output.txt."
+
+
+rstexamples:
+ ../tools/make_examples.py
+
+rstexamples-no-exec:
+ ../tools/make_examples.py --no-exec
+
+# Sourceforge doesn't appear to have a way of copying the files
+# without specifying a username. So we'll probably have one target
+# for each project admin
+#
+# Note: If several developers alternate commits, we may get permission errors
+# from rsync with the upload targets. If that happens, use the *_full targets
+# below which first wipe out the whole dir on SF and then re-upload.
+
+sf_fer_perez:
+ @echo "Copying html files to sourceforge..."
+ rsync -avH --delete -e ssh _build/html/ fer_perez,nipy@web.sourceforge.net:htdocs/nitime
+
+sf_arokem:
+ @echo "Copying html files to sourceforge..."
+ rsync -avH --delete -e ssh _build/html/ arokem,nipy@web.sourceforge.net:htdocs/nitime
+
+# Targets that force a clean and re-upload
+sf_fer_perez_clean:
+ @echo "Cleaning up sourceforge site"
+ ssh fer_perez,nipy@shell.sf.net "rm -rf /home/groups/n/ni/nipy/htdocs/nitime/*"
+
+sf_fer_perez_full: sf_fer_perez_clean sf_fer_perez
+
+sf_arokem_clean:
+ @echo "Cleaning up sourceforge site"
+ ssh arokem,nipy@shell.sf.net "rm -rf /home/groups/n/ni/nipy/htdocs/nitime/*"
+
+sf_arokem_full: sf_arokem_clean sf_arokem
diff --git a/doc/_static/Scipy2009Nitime.pdf b/doc/_static/Scipy2009Nitime.pdf
new file mode 100644
index 0000000..a66dcfb
--- /dev/null
+++ b/doc/_static/Scipy2009Nitime.pdf
Binary files differ
diff --git a/doc/_static/nipy.css b/doc/_static/nipy.css
new file mode 100644
index 0000000..7aa01b7
--- /dev/null
+++ b/doc/_static/nipy.css
@@ -0,0 +1,507 @@
+/**
+ * Alternate Sphinx design
+ * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl.
+ */
+
+body {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
+ font-size: 14px;
+ letter-spacing: -0.01em;
+ line-height: 150%;
+ text-align: center;
+ /*background-color: #AFC1C4; */
+ background-color: #BFD1D4;
+ color: black;
+ padding: 0;
+ border: 1px solid #aaa;
+
+ margin: 0px 80px 0px 80px;
+ min-width: 740px;
+}
+
+a {
+ color: #CA7900;
+ text-decoration: none;
+}
+
+a:hover {
+ color: #2491CF;
+}
+
+pre {
+ font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.015em;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ background-color: #f8f8f8;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+ border: 0;
+ background-color: transparent;
+ color: #aaa;
+}
+
+table.highlighttable {
+ margin-left: 0.5em;
+}
+
+table.highlighttable td {
+ padding: 0 0.5em 0 0.5em;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.95em;
+ letter-spacing: 0.01em;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border-bottom: 1px solid #ddd;
+ color: #333;
+}
+
+tt.descname {
+ background-color: transparent;
+ font-weight: bold;
+ font-size: 1.2em;
+ border: 0;
+}
+
+tt.descclassname {
+ background-color: transparent;
+ border: 0;
+}
+
+tt.xref {
+ background-color: transparent;
+ font-weight: bold;
+ border: 0;
+}
+
+a tt {
+ background-color: transparent;
+ font-weight: bold;
+ border: 0;
+ color: #CA7900;
+}
+
+a tt:hover {
+ color: #2491CF;
+}
+
+dl {
+ margin-bottom: 15px;
+}
+
+dd p {
+ margin-top: 0px;
+}
+
+dd ul, dd table {
+ margin-bottom: 10px;
+}
+
+dd {
+ margin-top: 3px;
+ margin-bottom: 10px;
+ margin-left: 30px;
+}
+
+.refcount {
+ color: #060;
+}
+
+dt:target,
+.highlight {
+ background-color: #fbe54e;
+}
+
+dl.class, dl.function {
+ border-top: 2px solid #888;
+}
+
+dl.method, dl.attribute {
+ border-top: 1px solid #aaa;
+}
+
+dl.glossary dt {
+ font-weight: bold;
+ font-size: 1.1em;
+}
+
+pre {
+ line-height: 120%;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+.first {
+ margin-top: 0 !important;
+}
+
+div.document {
+ background-color: white;
+ text-align: left;
+ background-image: url(contents.png);
+ background-repeat: repeat-x;
+}
+
+/*
+div.documentwrapper {
+ width: 100%;
+}
+*/
+
+div.clearer {
+ clear: both;
+}
+
+div.related h3 {
+ display: none;
+}
+
+div.related ul {
+ background-image: url(navigation.png);
+ height: 2em;
+ list-style: none;
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+ margin: 0;
+ padding-left: 10px;
+}
+
+div.related ul li {
+ margin: 0;
+ padding: 0;
+ height: 2em;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: #EE9816;
+}
+
+div.related ul li a:hover {
+ color: #3CA8E7;
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.bodywrapper {
+ margin: 0 240px 0 0;
+ border-right: 1px solid #ccc;
+}
+
+div.body a {
+ text-decoration: underline;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0.5em 15px 15px 0;
+ width: 210px;
+ float: right;
+ text-align: left;
+/* margin-left: -100%; */
+}
+
+div.sphinxsidebar h4, div.sphinxsidebar h3 {
+ margin: 1em 0 0.5em 0;
+ font-size: 0.9em;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border: 1px solid #86989B;
+ background-color: #AFC1C4;
+}
+
+div.sphinxsidebar ul {
+ padding-left: 1.5em;
+ margin-top: 7px;
+ list-style: none;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ list-style: square;
+ margin-left: 20px;
+}
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+p.rubric {
+ font-weight: bold;
+}
+
+h1 {
+ margin: 0;
+ padding: 0.7em 0 0.3em 0;
+ font-size: 1.5em;
+ color: #11557C;
+}
+
+h2 {
+ margin: 1.3em 0 0.2em 0;
+ font-size: 1.35em;
+ padding: 0;
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.2em;
+}
+
+h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
+ color: black!important;
+}
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+ display: none;
+ margin: 0 0 0 0.3em;
+ padding: 0 0.2em 0 0.2em;
+ color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+ display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+ color: #777;
+ background-color: #eee;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.footer {
+ background-color: #E3EFF1;
+ color: #86989B;
+ padding: 3px 8px 3px 0;
+ clear: both;
+ font-size: 0.8em;
+ text-align: right;
+}
+
+div.footer a {
+ color: #86989B;
+ text-decoration: underline;
+}
+
+div.pagination {
+ margin-top: 2em;
+ padding-top: 0.5em;
+ border-top: 1px solid black;
+ text-align: center;
+}
+
+div.sphinxsidebar ul.toc {
+ margin: 1em 0 1em 0;
+ padding: 0 0 0 0.5em;
+ list-style: none;
+}
+
+div.sphinxsidebar ul.toc li {
+ margin: 0.5em 0 0.5em 0;
+ font-size: 0.9em;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul.toc li p {
+ margin: 0;
+ padding: 0;
+}
+
+div.sphinxsidebar ul.toc ul {
+ margin: 0.2em 0 0.2em 0;
+ padding: 0 0 0 1.8em;
+}
+
+div.sphinxsidebar ul.toc ul li {
+ padding: 0;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 0 0;
+ border: 1px solid #86989B;
+ background-color: #f7f7f7;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ margin: 0;
+ padding: 0.1em 0 0.1em 0.5em;
+ color: white;
+ border-bottom: 1px solid #86989B;
+ font-weight: bold;
+ background-color: #AFC1C4;
+}
+
+div.warning {
+ border: 1px solid #940000;
+}
+
+div.warning p.admonition-title {
+ background-color: #CF0000;
+ border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+div.versioninfo {
+ margin: 1em 0 0 0;
+ border: 1px solid #ccc;
+ background-color: #DDEAF0;
+ padding: 8px;
+ line-height: 1.3em;
+ font-size: 0.9em;
+}
+
+
+a.headerlink {
+ color: #c60f0f!important;
+ font-size: 1em;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none!important;
+ visibility: hidden;
+}
+
+h1:hover > a.headerlink,
+h2:hover > a.headerlink,
+h3:hover > a.headerlink,
+h4:hover > a.headerlink,
+h5:hover > a.headerlink,
+h6:hover > a.headerlink,
+dt:hover > a.headerlink {
+ visibility: visible;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+table.indextable td {
+ text-align: left;
+ vertical-align: top;
+}
+
+table.indextable dl, table.indextable dd {
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+table.indextable tr.pcap {
+ height: 10px;
+}
+
+table.indextable tr.cap {
+ margin-top: 10px;
+ background-color: #f2f2f2;
+}
+
+img.toggler {
+ margin-right: 3px;
+ margin-top: 3px;
+ cursor: pointer;
+}
+
+img.inheritance {
+ border: 0px
+}
+
+form.pfform {
+ margin: 10px 0 20px 0;
+}
+
+table.contentstable {
+ width: 90%;
+}
+
+table.contentstable p.biglink {
+ line-height: 150%;
+}
+
+a.biglink {
+ font-size: 1.3em;
+}
+
+span.linkdescr {
+ font-style: italic;
+ padding-top: 5px;
+ font-size: 90%;
+}
+
+ul.search {
+ margin: 10px 0 0 20px;
+ padding: 0;
+}
+
+ul.search li {
+ padding: 5px 0 5px 20px;
+ background-image: url(file.png);
+ background-repeat: no-repeat;
+ background-position: 0 7px;
+}
+
+ul.search li a {
+ font-weight: bold;
+}
+
+ul.search li div.context {
+ color: #888;
+ margin: 2px 0 0 30px;
+ text-align: left;
+}
+
+ul.keywordmatches li.goodmatch a {
+ font-weight: bold;
+}
diff --git a/doc/_static/nitime-banner-bg.png b/doc/_static/nitime-banner-bg.png
new file mode 100644
index 0000000..e0764eb
--- /dev/null
+++ b/doc/_static/nitime-banner-bg.png
Binary files differ
diff --git a/doc/_static/nitime-banner.svgz b/doc/_static/nitime-banner.svgz
new file mode 100644
index 0000000..be0d221
--- /dev/null
+++ b/doc/_static/nitime-banner.svgz
Binary files differ
diff --git a/doc/_static/nitime.css b/doc/_static/nitime.css
new file mode 100644
index 0000000..a1503c1
--- /dev/null
+++ b/doc/_static/nitime.css
@@ -0,0 +1,22 @@
+@import url("./sphinxdoc.css");
+
+body {
+ background-color: #E5E2E2;
+}
+
+div.sphinxsidebar h4, div.sphinxsidebar h3 {
+ background-color: #2F83C8;
+}
+
+h1 {
+ color: #2F83C8;
+}
+
+div.footer {
+ background-color: #CEC890;
+}
+
+
+div.body {
+ background-color: white;
+}
diff --git a/doc/_static/reggie2.png b/doc/_static/reggie2.png
new file mode 100644
index 0000000..1febedb
--- /dev/null
+++ b/doc/_static/reggie2.png
Binary files differ
diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html
new file mode 100644
index 0000000..fdeb147
--- /dev/null
+++ b/doc/_templates/layout.html
@@ -0,0 +1,85 @@
+{% extends "!layout.html" %}
+{% set title = 'Neuroimaging in Python' %}
+
+{% block rootrellink %}
+ <li><a href="{{pathto('index')}}">Nitime Home</a> |&nbsp;</li>
+{% endblock %}
+
+
+{% block extrahead %}
+ <meta name="keywords" content="nipy, neuroimaging, python, neuroscience, time
+ series">
+{% endblock %}
+
+{% block header %}
+<div style="background-color: white; text-align: left; padding: 10px 10px 15px 15px">
+ <a href="{{pathto('index') }}">
+ <img src="{{ pathto("_static/nitime-banner-bg.png", 1) }}" alt="NIPY logo" border="0" />
+</div>
+{% endblock %}
+
+{# This block gets put at the top of the sidebar #}
+{% block sidebarlogo %}
+
+
+<h4> Site Navigation </h4>
+ <ul>
+ <li><a href="{{pathto('documentation')}}">Documentation</a></li>
+ <li><a href="{{pathto('devel/index')}}">Development</a></li>
+ <li><a href="{{pathto('news')}}">News</a></li>
+ </ul>
+
+<h4> NIPY Community </h4>
+ <ul class="simple">
+ <li><a class="reference external"
+ href="http://nipy.sourceforge.net/">Community Home</a></li>
+ <li><a class="reference external"
+ href="http://nipy.sourceforge.net/software/projects/">NIPY Projects</a></li>
+ <li><a class="reference external"
+ href="http://mail.scipy.org/mailman/listinfo/nipy-devel">Mailing List</a></li>
+ <li><a class="reference external"
+ href="http://nipy.sourceforge.net/software/license/index.html">License</a></li>
+ </ul>
+
+{% endblock %}
+
+{# I had to copy the whole search block just to change the rendered text,
+ so it doesn't mention modules or classes #}
+{%- block sidebarsearch %}
+{%- if pagename != "search" %}
+
+<div id="searchbox-ml" style="display: none">
+ <h3>Search mailing list archive</h3>
+ <script type="text/javascript">
+ function mlsearch(curobj)
+ {
+ curobj.q.value="site:lists.neuroimaging.scipy.org/pipermail/nipy-devel/ "+curobj.userquery.value
+ }
+ </script>
+ <form action="http://www.google.com/search" method="get" onSubmit="mlsearch(this)">
+ <input name="userquery" size="13" type="text" /> <input type="submit" value="Go" />
+ <input name="q" type="hidden" />
+ </form>
+</div>
+
+<div id="searchbox-site" style="display: none">
+ <h3>{{ _('Search this site') }}</h3>
+ <form class="search" action="{{ pathto('search') }}" method="get">
+ <input type="text" name="q" size="13" />
+ <input type="submit" value="{{ _('Go') }}" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ <p class="searchtip" style="font-size: 90%">
+ </p>
+</div>
+<script type="text/javascript">$('#searchbox-ml').show(0);</script>
+<script type="text/javascript">$('#searchbox-site').show(0);</script>
+{%- endif %}
+
+{# The sidebarsearch block is the last one available in the default sidebar()
+ macro, so the only way to add something to the bottom of the sidebar is to
+ put it here, at the end of the sidebarsearch block (before it closes).
+ #}
+
+{%- endblock %}
diff --git a/doc/_templates/quicklinks.html b/doc/_templates/quicklinks.html
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/doc/_templates/quicklinks.html
diff --git a/doc/api/index.rst b/doc/api/index.rst
new file mode 100644
index 0000000..87a4a88
--- /dev/null
+++ b/doc/api/index.rst
@@ -0,0 +1,10 @@
+.. _api-index:
+
+#####
+ API
+#####
+
+:Release: |version|
+:Date: |today|
+
+.. include:: generated/gen.rst
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..ae01b40
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+#
+# nitime documentation build configuration file, created by
+# sphinx-quickstart on Mon Jul 20 12:30:18 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import warnings
+
+# Declare here the things that our documentation build will depend on here, so
+# that if they are not present the build fails immediately rather than
+# producing possibly obscure errors later on.
+
+# Documentation dependency format: each dep is a pair of two entries, the first
+# is a string that should be a valid (possibly dotted) package name, and the
+# second a list (possibly empty) of names to import from that package.
+doc_deps = [['networkx', []],
+ ['mpl_toolkits.axes_grid', ['make_axes_locatable']],
+ ]
+
+# Analyze the dependencies, and fail if any is unmet, with a hopefully
+# reasonable error
+failed_deps = []
+for package, parts in doc_deps:
+ try:
+ __import__(package, fromlist=parts)
+ except ImportError:
+ failed_deps.append([package, parts])
+
+if failed_deps:
+ print
+ print "*** ERROR IN DOCUMENTATION BUILD ***"
+ print "The documentation build is missing these dependencies:"
+ for pak, parts in failed_deps:
+ if parts:
+ print "Package: %s, parts: %s" % (pak, parts)
+ else:
+ print "Package: %s" % pak
+
+ raise RuntimeError('Unmet dependencies for documentation build')
+
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.append(os.path.abspath('sphinxext'))
+
+#-----------------------------------------------------------------------------
+# Error control in examples and plots
+#-----------------------------------------------------------------------------
+# We want by default our documentation to NOT build if any plot warnings are
+# generated, so we turn PlotWarning into an error. For now this requires using
+# a patched version of the plot_directive, but we'll upstream this to matplotlib.
+import plot_directive
+# If you *really* want to disable these error checks to be able to finish a doc
+# build, comment out the next line. But please do NOT leave it uncommented in
+# a committed file, so that the official build is always in the paranoid mode
+# (where the warnings become errors).
+warnings.simplefilter('error', plot_directive.PlotWarning)
+
+# -- General configuration -----------------------------------------------------
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc',
+ 'sphinx.ext.doctest',
+ #'sphinx.ext.intersphinx',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.pngmath',
+ 'numpydoc',
+ 'sphinx.ext.inheritance_diagram',
+ 'ipython_console_highlighting',
+ 'only_directives',
+ 'math_dollar', # Support for $x$ math
+ # For now, we use our own patched plot directive, we'll revert
+ # back to the official one once our changes are upstream.
+ #'matplotlib.sphinxext.plot_directive',
+ 'plot_directive',
+ 'github'
+ ]
+
+# ghissue config
+github_project_url = "https://github.com/nipy/nitime"
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'nitime'
+copyright = u'2009, Neuroimaging in Python team'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# We read the version info from the source file.
+ver = {}
+execfile('../nitime/version.py', ver)
+# The short X.Y version.
+version = '%s.%s' % (ver['_version_major'], ver['_version_minor'])
+# The full version, including alpha/beta/rc tags.
+release = ver['__version__']
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+today_fmt = '%B %d, %Y, %H:%M PDT'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# If true, the reST sources are included in the HTML build as _sources/<name>.
+html_copy_source = False
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# Flag to show todo items in rendered output
+todo_include_todos = True
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'sphinxdoc'
+
+# The style sheet to use for HTML and HTML Help pages. A file of that name
+# must exist either in Sphinx' static/ path, or in one of the custom paths
+# given in html_static_path.
+html_style = 'nitime.css'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# Content template for the index page.
+html_index = 'index.html'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {'index': 'indexsidebar.html'}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {'index': 'index.html'}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = False
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'nitimedoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('documentation', 'nitime.tex', u'nitime Documentation',
+ u'Neuroimaging in Python team', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/devel/branch_list.png b/doc/devel/branch_list.png
new file mode 100644
index 0000000..1196eb7
--- /dev/null
+++ b/doc/devel/branch_list.png
Binary files differ
diff --git a/doc/devel/branch_list_compare.png b/doc/devel/branch_list_compare.png
new file mode 100644
index 0000000..336afa3
--- /dev/null
+++ b/doc/devel/branch_list_compare.png
Binary files differ
diff --git a/doc/devel/configure_git.rst b/doc/devel/configure_git.rst
new file mode 100644
index 0000000..3484b6c
--- /dev/null
+++ b/doc/devel/configure_git.rst
@@ -0,0 +1,123 @@
+.. _configure-git:
+
+===============
+ Configure git
+===============
+
+.. _git-config-basic:
+
+Overview
+========
+
+Your personal git_ configurations are saved in the ``.gitconfig`` file in
+your home directory.
+Here is an example ``.gitconfig`` file::
+
+ [user]
+ name = Your Name
+ email = you@yourdomain.example.com
+
+ [alias]
+ ci = commit -a
+ co = checkout
+ st = status -a
+ stat = status -a
+ br = branch
+ wdiff = diff --color-words
+
+ [core]
+ editor = vim
+
+ [merge]
+ summary = true
+
+You can edit this file directly or you can use the ``git config --global``
+command::
+
+ git config --global user.name "Your Name"
+ git config --global user.email you@yourdomain.example.com
+ git config --global alias.ci "commit -a"
+ git config --global alias.co checkout
+ git config --global alias.st "status -a"
+ git config --global alias.stat "status -a"
+ git config --global alias.br branch
+ git config --global alias.wdiff "diff --color-words"
+ git config --global core.editor vim
+ git config --global merge.summary true
+
+To set up on another computer, you can copy your ``~/.gitconfig`` file,
+or run the commands above.
+
+In detail
+=========
+
+user.name and user.email
+------------------------
+
+It is good practice to tell git_ who you are, for labeling any changes
+you make to the code. The simplest way to do this is from the command
+line::
+
+ git config --global user.name "Your Name"
+ git config --global user.email you@yourdomain.example.com
+
+This will write the settings into your git configuration file, which
+should now contain a user section with your name and email::
+
+ [user]
+ name = Your Name
+ email = you@yourdomain.example.com
+
+Of course you'll need to replace ``Your Name`` and ``you@yourdomain.example.com``
+with your actual name and email address.
+
+Aliases
+-------
+
+You might well benefit from some aliases to common commands.
+
+For example, you might well want to be able to shorten ``git checkout``
+to ``git co``. Or you may want to alias ``git diff --color-words``
+(which gives a nicely formatted output of the diff) to ``git wdiff``
+
+The following ``git config --global`` commands::
+
+ git config --global alias.ci "commit -a"
+ git config --global alias.co checkout
+ git config --global alias.st "status -a"
+ git config --global alias.stat "status -a"
+ git config --global alias.br branch
+ git config --global alias.wdiff "diff --color-words"
+
+will create an ``alias`` section in your ``.gitconfig`` file with contents
+like this::
+
+ [alias]
+ ci = commit -a
+ co = checkout
+ st = status -a
+ stat = status -a
+ br = branch
+ wdiff = diff --color-words
+
+Editor
+------
+
+You may also want to make sure that your editor of choice is used ::
+
+ git config --global core.editor vim
+
+Merging
+-------
+
+To enforce summaries when doing merges (``~/.gitconfig`` file again)::
+
+ [merge]
+ summary = true
+
+Or from the command line::
+
+ git config --global merge.summary true
+
+
+.. include:: git_links.inc
diff --git a/doc/devel/development_workflow.rst b/doc/devel/development_workflow.rst
new file mode 100644
index 0000000..284abdf
--- /dev/null
+++ b/doc/devel/development_workflow.rst
@@ -0,0 +1,233 @@
+.. _development-workflow:
+
+====================
+Development workflow
+====================
+
+You already have your own forked copy of the nitime_ repository, by
+following :ref:`forking`, :ref:`set-up-fork`, and you have configured
+git_ by following :ref:`configure-git`.
+
+Workflow summary
+================
+
+* Keep your ``master`` branch clean of edits that have not been merged
+ to the main nitime_ development repo. Your ``master`` then will follow
+ the main nitime_ repository.
+* Start a new *feature branch* for each set of edits that you do.
+* If you can avoid it, try not to merge other branches into your feature
+ branch while you are working.
+* Ask for review!
+
+This way of working really helps to keep work well organized, and in
+keeping history as clear as possible.
+
+See - for example - `linux git workflow`_.
+
+Making a new feature branch
+===========================
+
+::
+
+ git branch my-new-feature
+ git checkout my-new-feature
+
+Generally, you will want to keep this also on your public github_ fork
+of nitime_. To do this, you `git push`_ this new branch up to your github_
+repo. Generally (if you followed the instructions in these pages, and
+by default), git will have a link to your github_ repo, called
+``origin``. You push up to your own repo on github_ with::
+
+ git push origin my-new-feature
+
+From now on git_ will know that ``my-new-feature`` is related to the
+``my-new-feature`` branch in the github_ repo.
+
+The editing workflow
+====================
+
+Overview
+--------
+
+::
+
+ # hack hack
+ git add my_new_file
+ git commit -am 'NF - some message'
+ git push
+
+In more detail
+--------------
+
+#. Make some changes
+#. See which files have changed with ``git status`` (see `git status`_).
+ You'll see a listing like this one::
+
+ # On branch ny-new-feature
+ # Changed but not updated:
+ # (use "git add <file>..." to update what will be committed)
+ # (use "git checkout -- <file>..." to discard changes in working directory)
+ #
+ # modified: README
+ #
+ # Untracked files:
+ # (use "git add <file>..." to include in what will be committed)
+ #
+ # INSTALL
+ no changes added to commit (use "git add" and/or "git commit -a")
+
+#. Check what the actual changes are with ``git diff`` (`git diff`_).
+#. Add any new files to version control ``git add new_file_name`` (see
+ `git add`_).
+#. To commit all modified files into the local copy of your repo,, do
+ ``git commit -am 'A commit message'``. Note the ``-am`` options to
+ ``commit``. The ``m`` flag just signals that you're going to type a
+ message on the command line. The ``a`` flag - you can just take on
+ faith - or see `why the -a flag?`_. See also the `git commit`_ manual
+ page.
+#. To push the changes up to your forked repo on github_, do a ``git
+ push`` (see `git push`).
+
+Asking for code review
+======================
+
+#. Go to your repo URL - e.g. ``http://github.com/your-user-name/nitime``.
+#. Click on the *Branch list* button:
+
+ .. image:: branch_list.png
+
+#. Click on the *Compare* button for your feature branch - here ``my-new-feature``:
+
+ .. image:: branch_list_compare.png
+
+#. If asked, select the *base* and *comparison* branch names you want to
+ compare. Usually these will be ``master`` and ``my-new-feature``
+ (where that is your feature branch name).
+#. At this point you should get a nice summary of the changes. Copy the
+ URL for this, and post it to the `nitime mailing list`_, asking for
+ review. The URL will look something like:
+ ``http://github.com/your-user-name/nitime/compare/master...my-new-feature``.
+ There's an example at
+ http://github.com/matthew-brett/nipy/compare/master...find-install-data
+ See: http://github.com/blog/612-introducing-github-compare-view for
+ more detail.
+
+The generated comparison, is between your feature branch
+``my-new-feature``, and the place in ``master`` from which you branched
+``my-new-feature``. In other words, you can keep updating ``master``
+without interfering with the output from the comparison. More detail?
+Note the three dots in the URL above (``master...my-new-feature``) and
+see :ref:`dot2-dot3`.
+
+Asking for your changes to be merged with the main repo
+=======================================================
+
+When you are ready to ask for the merge of your code:
+
+#. Go to the URL of your forked repo, say
+ ``http://github.com/your-user-name/nitime.git``.
+#. Click on the 'Pull request' button:
+
+ .. image:: pull_button.png
+
+ Enter a message; we suggest you select only ``nitime`` as the
+ recipient. The message will go to the `nitime mailing list`_. Please
+ feel free to add others from the list as you like.
+
+Merging from trunk
+==================
+
+This updates your code from the upstream `nitime github`_ repo.
+
+Overview
+--------
+
+::
+
+ # go to your master branch
+ git checkout master
+ # pull changes from github
+ git fetch upstream
+ # merge from upstream
+ git merge upstream/master
+
+In detail
+---------
+
+We suggest that you do this only for your ``master`` branch, and leave
+your 'feature' branches unmerged, to keep their history as clean as
+possible. This makes code review easier::
+
+ git checkout master
+
+Make sure you have done :ref:`linking-to-upstream`.
+
+Merge the upstream code into your current development by first pulling
+the upstream repo to a copy on your local machine::
+
+ git fetch upstream
+
+then merging into your current branch::
+
+ git merge upstream/master
+
+Deleting a branch on github_
+============================
+
+::
+
+ git checkout master
+ # delete branch locally
+ git branch -D my-unwanted-branch
+ # delete branch on github
+ git push origin :my-unwanted-branch
+
+(Note the colon ``:`` before ``test-branch``. See also:
+http://github.com/guides/remove-a-remote-branch
+
+Several people sharing a single repository
+==========================================
+
+If you want to work on some stuff with other people, where you are all
+committing into the same repository, or even the same branch, then just
+share it via github_.
+
+First fork nitime into your account, as from :ref:`forking`.
+
+Then, go to your forked repository github page, say
+``http://github.com/your-user-name/nitime``
+
+Click on the 'Admin' button, and add anyone else to the repo as a
+collaborator:
+
+ .. image:: pull_button.png
+
+Now all those people can do::
+
+ git clone git@githhub.com:your-user-name/nitime.git
+
+Remember that links starting with ``git@`` use the ssh protocol and are
+read-write; links starting with ``git://`` are read-only.
+
+Your collaborators can then commit directly into that repo with the
+usual::
+
+ git commit -am 'ENH - much better code'
+ git push origin master # pushes directly into your repo
+
+Exploring your repository
+=========================
+
+To see a graphical representation of the repository branches and
+commits::
+
+ gitk --all
+
+To see a linear list of commits for this branch::
+
+ git log
+
+You can also look at the `network graph visualizer`_ for your github_
+repo.
+
+.. include:: git_links.inc
diff --git a/doc/devel/dot2_dot3.rst b/doc/devel/dot2_dot3.rst
new file mode 100644
index 0000000..7759e2e
--- /dev/null
+++ b/doc/devel/dot2_dot3.rst
@@ -0,0 +1,28 @@
+.. _dot2-dot3:
+
+========================================
+ Two and three dots in difference specs
+========================================
+
+Thanks to Yarik Halchenko for this explanation.
+
+Imagine a series of commits A, B, C, D... Imagine that there are two
+branches, *topic* and *master*. You branched *topic* off *master* when
+*master* was at commit 'E'. The graph of the commits looks like this::
+
+
+ A---B---C topic
+ /
+ D---E---F---G master
+
+Then::
+
+ git diff master..topic
+
+will output the difference from G to C (i.e. with effects of F and G),
+while::
+
+ git diff master...topic
+
+would output just differences in the topic branch (i.e. only A, B, and
+C).
diff --git a/doc/devel/following_latest.rst b/doc/devel/following_latest.rst
new file mode 100644
index 0000000..904f5fc
--- /dev/null
+++ b/doc/devel/following_latest.rst
@@ -0,0 +1,36 @@
+.. _following-latest:
+
+=============================
+ Following the latest source
+=============================
+
+These are the instructions if you just want to follow the latest
+*nitime* source, but you don't need to do any development for now.
+
+The steps are:
+
+* :ref:`install-git`
+* get local copy of the git repository from github_
+* update local copy from time to time
+
+Get the local copy of the code
+==============================
+
+From the command line::
+
+ git clone git://github.com/nipy/nitime.git
+
+You now have a copy of the code tree in the new ``nitime`` directory.
+
+Updating the code
+=================
+
+From time to time you may want to pull down the latest code. Do this with::
+
+ cd nitime
+ git pull
+
+The tree in ``nitime`` will now have the latest changes from the initial
+repository.
+
+.. include:: git_links.inc
diff --git a/doc/devel/forking_button.png b/doc/devel/forking_button.png
new file mode 100644
index 0000000..d0e0413
--- /dev/null
+++ b/doc/devel/forking_button.png
Binary files differ
diff --git a/doc/devel/forking_hell.rst b/doc/devel/forking_hell.rst
new file mode 100644
index 0000000..1f4e5d5
--- /dev/null
+++ b/doc/devel/forking_hell.rst
@@ -0,0 +1,33 @@
+.. _forking:
+
+==========================================
+Making your own copy (fork) of nitime
+==========================================
+
+You need to do this only once. The instructions here are very similar
+to the instructions at http://help.github.com/forking/ - please see that
+page for more detail. We're repeating some of it here just to give the
+specifics for the nitime_ project, and to suggest some default names.
+
+Set up and configure a github_ account
+======================================
+
+If you don't have a github_ account, go to the github_ page, and make one.
+
+You then need to configure your account to allow write access - see the
+``Generating SSH keys`` help on `github help`_.
+
+Create your own forked copy of nitime_
+=========================================
+
+#. Log into your github_ account.
+#. Go to the nitime_ github home at `nitime github`_.
+#. Click on the *fork* button:
+
+ .. image:: forking_button.png
+
+ Now, after a short pause and some 'Hardcore forking action', you
+ should find yourself at the home page for your own forked copy of nitime_.
+
+.. include:: git_links.inc
+
diff --git a/doc/devel/git_development.rst b/doc/devel/git_development.rst
new file mode 100644
index 0000000..64522c6
--- /dev/null
+++ b/doc/devel/git_development.rst
@@ -0,0 +1,16 @@
+.. _git-development:
+
+=====================
+ Git for development
+=====================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ forking_hell
+ set_up_fork
+ configure_git
+ development_workflow
+
diff --git a/doc/devel/git_install.rst b/doc/devel/git_install.rst
new file mode 100644
index 0000000..422bdf6
--- /dev/null
+++ b/doc/devel/git_install.rst
@@ -0,0 +1,26 @@
+.. _install-git:
+
+=============
+ Install git
+=============
+
+Overview
+========
+
+================ =============
+Debian / Ubuntu ``sudo apt-get install git-core``
+Fedora ``sudo yum install git-core``
+Windows Download and install msysGit_
+OS X Use the git-osx-installer_
+================ =============
+
+In detail
+=========
+
+See the git_ page for the most recent information.
+
+Have a look at the github_ install help pages available from `github help`_
+
+There are good instructions here: http://book.git-scm.com/2_installing_git.html
+
+.. include:: git_links.inc
diff --git a/doc/devel/git_intro.rst b/doc/devel/git_intro.rst
new file mode 100644
index 0000000..f2ba763
--- /dev/null
+++ b/doc/devel/git_intro.rst
@@ -0,0 +1,18 @@
+==============
+ Introduction
+==============
+
+These pages describe a git_ and github_ workflow for the nitime_
+project.
+
+There are several different workflows here, for different ways of
+working with *nitime*.
+
+This is not a comprehensive git_ reference, it's just a workflow for our
+own project. It's tailored to the github_ hosting service. You may well
+find better or quicker ways of getting stuff done with git_, but these
+should get you started.
+
+For general resources for learning git_ see :ref:`git-resources`.
+
+.. include:: git_links.inc
diff --git a/doc/devel/git_links.inc b/doc/devel/git_links.inc
new file mode 100644
index 0000000..1ddb414
--- /dev/null
+++ b/doc/devel/git_links.inc
@@ -0,0 +1,67 @@
+.. This (-*- rst -*-) format file contains commonly used link targets
+ and name substitutions. It may be included in many files,
+ therefore it should only contain link targets and name
+ substitutions. Try grepping for "^\.\. _" to find plausible
+ candidates for this list.
+
+.. NOTE: reST targets are
+ __not_case_sensitive__, so only one target definition is needed for
+ nipy, NIPY, Nipy, etc...
+
+.. nitime
+.. _nitime: http://nipy.org/nitime
+.. _`nitime github`: http://github.com/nipy/nitime
+.. _`nitime mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel
+
+.. nipy
+.. _nipy: http://nipy.org/nipy
+.. _`nipy github`: http://github.com/nipy/nipy
+.. _`nipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel
+
+.. ipython
+.. _ipython: http://ipython.scipy.org
+.. _`ipython github`: http://github.com/ipython/ipython
+.. _`ipython mailing list`: http://mail.scipy.org/mailman/listinfo/IPython-dev
+
+.. dipy
+.. _dipy: http://nipy.org/dipy
+.. _`dipy github`: http://github.com/Garyfallidis/dipy
+.. _`dipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel
+
+.. git stuff
+.. _git: http://git-scm.com/
+.. _github: http://github.com
+.. _github help: http://help.github.com
+.. _msysgit: http://code.google.com/p/msysgit/downloads/list
+.. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list
+.. _subversion: http://subversion.tigris.org/
+.. _git cheat sheet: http://github.com/guides/git-cheat-sheet
+.. _pro git book: http://progit.org/
+.. _git svn crash course: http://git-scm.com/course/svn.html
+.. _learn.github: http://learn.github.com/
+.. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer
+.. _git user manual: http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+.. _git tutorial: http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html
+.. _git community book: http://book.git-scm.com/
+.. _git ready: http://www.gitready.com/
+.. _git casts: http://www.gitcasts.com/
+.. _Fernando's git page: http://www.fperez.org/py4science/git.html
+.. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html
+.. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/
+.. _git clone: http://www.kernel.org/pub/software/scm/git/docs/git-clone.html
+.. _git checkout: http://www.kernel.org/pub/software/scm/git/docs/git-checkout.html
+.. _git commit: http://www.kernel.org/pub/software/scm/git/docs/git-commit.html
+.. _git push: http://www.kernel.org/pub/software/scm/git/docs/git-push.html
+.. _git pull: http://www.kernel.org/pub/software/scm/git/docs/git-pull.html
+.. _git add: http://www.kernel.org/pub/software/scm/git/docs/git-add.html
+.. _git status: http://www.kernel.org/pub/software/scm/git/docs/git-status.html
+.. _git diff: http://www.kernel.org/pub/software/scm/git/docs/git-diff.html
+.. _git log: http://www.kernel.org/pub/software/scm/git/docs/git-log.html
+.. _git branch: http://www.kernel.org/pub/software/scm/git/docs/git-branch.html
+.. _git remote: http://www.kernel.org/pub/software/scm/git/docs/git-remote.html
+.. _git config: http://www.kernel.org/pub/software/scm/git/docs/git-config.html
+.. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
+.. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
+.. _git management: http://kerneltrap.org/Linux/Git_Management
+.. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html
+.. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html
diff --git a/doc/devel/git_links.txt b/doc/devel/git_links.txt
new file mode 100644
index 0000000..e58449f
--- /dev/null
+++ b/doc/devel/git_links.txt
@@ -0,0 +1,61 @@
+.. This (-*- rst -*-) format file contains commonly used link targets
+ and name substitutions. It may be included in many files,
+ therefore it should only contain link targets and name
+ substitutions. Try grepping for "^\.\. _" to find plausible
+ candidates for this list.
+
+.. NOTE: reST targets are
+ __not_case_sensitive__, so only one target definition is needed for
+ nipy, NIPY, Nipy, etc...
+
+.. _nitime: http://nipy.org/nitime
+.. _`nitime github`: http://github.com/fperez/nitime/
+.. _`nitime mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel
+
+.. nipy
+.. _nipy: http://neuroimaging.scipy.org
+.. _`nipy github`: http://github.com/nipy
+.. _`nipy mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel
+
+.. ipython
+.. _ipython: http://ipython.scipy.org
+.. _`ipython github`: http://github.com/ipython
+.. _`ipython mailing list`: http://mail.scipy.org/mailman/listinfo/IPython-dev
+
+.. git stuff
+.. _git: http://git-scm.com/
+.. _github: http://github.com
+.. _github help: http://help.github.com
+.. _msysgit: http://code.google.com/p/msysgit/downloads/list
+.. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list
+.. _subversion: http://subversion.tigris.org/
+.. _git cheat sheet: http://github.com/guides/git-cheat-sheet
+.. _pro git book: http://progit.org/
+.. _git svn crash course: http://git-scm.com/course/svn.html
+.. _learn.github: http://learn.github.com/
+.. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer
+.. _git user manual: http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+.. _git tutorial: http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html
+.. _git community book: http://book.git-scm.com/
+.. _git ready: http://www.gitready.com/
+.. _git casts: http://www.gitcasts.com/
+.. _Fernando's git page: http://www.fperez.org/py4science/git.html
+.. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html
+.. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/
+.. _git clone: http://www.kernel.org/pub/software/scm/git/docs/git-clone.html
+.. _git checkout: http://www.kernel.org/pub/software/scm/git/docs/git-checkout.html
+.. _git commit: http://www.kernel.org/pub/software/scm/git/docs/git-commit.html
+.. _git push: http://www.kernel.org/pub/software/scm/git/docs/git-push.html
+.. _git pull: http://www.kernel.org/pub/software/scm/git/docs/git-pull.html
+.. _git add: http://www.kernel.org/pub/software/scm/git/docs/git-add.html
+.. _git status: http://www.kernel.org/pub/software/scm/git/docs/git-status.html
+.. _git diff: http://www.kernel.org/pub/software/scm/git/docs/git-diff.html
+.. _git log: http://www.kernel.org/pub/software/scm/git/docs/git-log.html
+.. _git branch: http://www.kernel.org/pub/software/scm/git/docs/git-branch.html
+.. _git remote: http://www.kernel.org/pub/software/scm/git/docs/git-remote.html
+.. _git config: http://www.kernel.org/pub/software/scm/git/docs/git-config.html
+.. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
+.. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
+.. _git management: http://kerneltrap.org/Linux/Git_Management
+.. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html
+.. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html
diff --git a/doc/devel/git_resources.rst b/doc/devel/git_resources.rst
new file mode 100644
index 0000000..63a6c94
--- /dev/null
+++ b/doc/devel/git_resources.rst
@@ -0,0 +1,57 @@
+.. _git-resources:
+
+================
+ git_ resources
+================
+
+Tutorials and summaries
+=======================
+
+* `github help`_ has an excellent series of how-to guides.
+* `learn.github`_ has an excellent series of tutorials
+* The `pro git book`_ is a good in-depth book on git.
+* A `git cheat sheet`_ is a page giving summaries of common commands.
+* The `git user manual`_
+* The `git tutorial`_
+* The `git community book`_
+* `git ready`_ - a nice series of tutorials
+* `git casts`_ - video snippets giving git how-tos.
+* `git magic`_ - extended introduction with intermediate detail
+* Fernando Perez' git page - `Fernando's git page`_ - many links and tips
+* A good but technical page on `git concepts`_
+* Th `git parable`_ is an easy read explaining the concepts behind git.
+* `git svn crash course`_: git_ for those of us used to subversion_
+
+Advanced git workflow
+=====================
+
+There are many ways of working with git_; here are some posts on the
+rules of thumb that other projects have come up with:
+
+* Linus Torvalds on `git management`_
+* Linus Torvalds on `linux git workflow`_ . Summary; use the git tools
+ to make the history of your edits as clean as possible; merge from
+ upstream edits as little as possible in branches where you are doing
+ active development.
+
+Manual pages online
+===================
+
+You can get these on your own machine with (e.g) ``git help push`` or
+(same thing) ``git push --help``, but, for convenience, here are the
+online manual pages for some common commands:
+
+* `git add`_
+* `git branch`_
+* `git checkout`_
+* `git clone`_
+* `git commit`_
+* `git config`_
+* `git diff`_
+* `git log`_
+* `git pull`_
+* `git push`_
+* `git remote`_
+* `git status`_
+
+.. include:: git_links.inc
diff --git a/doc/devel/how_to_release.rst b/doc/devel/how_to_release.rst
new file mode 100644
index 0000000..5317a89
--- /dev/null
+++ b/doc/devel/how_to_release.rst
@@ -0,0 +1,60 @@
+.. _how-to-release:
+
+=================
+Releasing Nitime
+=================
+
+This section contains notes about the process that is used to release Nitime.
+
+Most of the release process is automated by the :file:`release` script in the
+:file:`tools` directory. This is just a handy reminder for the release manager.
+
+#. Write release notes in :file:`doc/whatsnew/` for the current release. Use
+ the notes that have hopefully accumulated in
+ :file:`doc/whatsnew/development`. For writing release notes, this will
+ cleanly show who contributed as author of commits (get the previous release
+ name from the tag list with ``git tag``)::
+
+ git log --pretty=format:"* %aN" PREV_RELEASE... | sort | uniq
+
+#. Uncomment the empty ``version_extra`` assignment in the :file:`version.py`
+ file, so that the complete version-string will not have the ``dev`` suffix.
+
+#. Update the website with announcements and links to the updated files on
+ github. Remember to put a short note both on the news page of the site, and
+ the index ``What's New`` section.
+
+#. Make sure that the released version of the docs is live on the site.
+
+#. Run :file:`build_release`, which does all the file checking and building
+ that the real release script will do. This will let you do test
+ installations, check that the build procedure runs OK, etc.
+
+#. Make the test installation **from one of the release tarballs**, make sure
+ that:
+
+ - The installation is being done into a place in your `PYTHONPATH`, which
+ will over-ride your development tree.
+
+ - The docs build in your test installation.
+
+ - The tests run and pass in your test installation.
+
+#. Run the :file:`release` script, which makes the tar.gz, eggs and Win32 .exe
+ installer. It posts them to the site and registers the release with PyPI.
+
+#. Tag the current state of the repository::
+
+ git tag -a rel/x.y -m"Releasing version x.y"
+ git push --tags origin master
+
+#. Draft a short release announcement with highlights of the release (and send
+ it off!).
+
+#. Increment the version number in the :file:`version.py` file and comment the
+ line with the additional ``version_extra``, so that you get back the ``dev``
+ tag on the version number.
+
+#. Commit this as the beginning of the development of the next version.
+
+#. Celebrate!
diff --git a/doc/devel/index.rst b/doc/devel/index.rst
new file mode 100644
index 0000000..fe02689
--- /dev/null
+++ b/doc/devel/index.rst
@@ -0,0 +1,18 @@
+.. _using-git:
+
+Working with *nitime* source code
+======================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ git_intro
+ git_install
+ following_latest
+ patching
+ git_development
+ git_resources
+ how_to_release
+
diff --git a/doc/devel/patching.rst b/doc/devel/patching.rst
new file mode 100644
index 0000000..097810c
--- /dev/null
+++ b/doc/devel/patching.rst
@@ -0,0 +1,123 @@
+================
+ Making a patch
+================
+
+You've discovered a bug or something else you want to change in nitime_ - excellent!
+
+You've worked out a way to fix it - even better!
+
+You want to tell us about it - best of all!
+
+The easiest way is to make a *patch* or set of patches. Here we explain
+how. Making a patch is the simplest and quickest, but if you're going
+to be doing anything more than simple quick things, please consider
+following the :ref:`git-development` model instead.
+
+.. _making-patches:
+
+Making patches
+==============
+
+Overview
+--------
+
+::
+
+ # tell git who you are
+ git config --global user.email you@yourdomain.example.com
+ git config --global user.name "Your Name Comes Here"
+ # get the repository if you don't have it
+ git clone git://github.com/nipy/nitime.git
+ # make a branch for your patching
+ cd nitime
+ git branch the-fix-im-thinking-of
+ git checkout the-fix-im-thinking-of
+ # hack, hack, hack
+ # Tell git about any new files you've made
+ git add somewhere/tests/test_my_bug.py
+ # commit work in progress as you go
+ git commit -am 'BF - added tests for Funny bug'
+ # hack hack, hack
+ git commit -am 'BF - added fix for Funny bug'
+ # make the patch files
+ git format-patch -M -C master
+
+Then, send the generated patch files to the `nitime mailing list`_ - where we will thank you warmly.
+
+In detail
+---------
+
+#. Tell git_ who you are so it can label the commits you've made::
+
+ git config --global user.email you@yourdomain.example.com
+ git config --global user.name "Your Name Comes Here"
+
+#. If you don't already have one, clone a copy of the nitime_ repository::
+
+ git clone git://github.com/nipy/nitime.git
+ cd nitime
+
+#. Make a 'feature branch'. This will be where you work on your bug
+ fix. It's nice and safe and leaves you with access to an unmodified
+ copy of the code in the main branch::
+
+ git branch the-fix-im-thinking-of
+ git checkout the-fix-im-thinking-of
+
+#. Do some edits, and commit them as you go::
+
+ # hack, hack, hack
+ # Tell git about any new files you've made
+ git add somewhere/tests/test_my_bug.py
+ # commit work in progress as you go
+ git commit -am 'BF - added tests for Funny bug'
+ # hack hack, hack
+ git commit -am 'BF - added fix for Funny bug'
+
+ Note the ``-am`` options to ``commit``. The ``m`` flag just signals
+ that you're going to type a message on the command line. The ``a``
+ flag - you can just take on faith - or see `why the -a flag?`_.
+
+#. When you have finished, check you have committed all your changes::
+
+ git status
+
+#. Finally, make your commits into patches. You want all the commits
+ since you branched from the ``master`` branch::
+
+ git format-patch -M -C master
+
+ You will now have several files named for the commits::
+
+ 0001-BF-added-tests-for-Funny-bug.patch
+ 0002-BF-added-fix-for-Funny-bug.patch
+
+ Send these files to the `nitime mailing list`_.
+
+When you are done, to switch back to the main copy of the code, just
+return to the ``master`` branch::
+
+ git checkout master
+
+Moving from patching to development
+===================================
+
+If you find you have done some patches, and you have one or more feature
+branches, you will probably want to switch to development mode. You can
+do this with the repository you have.
+
+Fork the nitime_ repository on github_ - :ref:`forking`. Then::
+
+ # checkout and refresh master branch from main repo
+ git checkout master
+ git pull origin master
+ # rename pointer to main repository to 'upstream'
+ git remote rename origin upstream
+ # point your repo to default read / write to your fork on github
+ git remote add origin git@github.com:your-user-name/nitime.git
+ # push up any branches you've made and want to keep
+ git push origin the-fix-im-thinking-of
+
+Then you can, if you want, follow the :ref:`development-workflow`.
+
+.. include:: git_links.inc
diff --git a/doc/devel/pull_button.png b/doc/devel/pull_button.png
new file mode 100644
index 0000000..e503168
--- /dev/null
+++ b/doc/devel/pull_button.png
Binary files differ
diff --git a/doc/devel/set_up_fork.rst b/doc/devel/set_up_fork.rst
new file mode 100644
index 0000000..f32eb78
--- /dev/null
+++ b/doc/devel/set_up_fork.rst
@@ -0,0 +1,68 @@
+.. _set-up-fork:
+
+==================
+ Set up your fork
+==================
+
+First you follow the instructions for :ref:`forking`.
+
+Overview
+========
+
+::
+
+ git clone git@github.com/your-user-name/nitime.git
+ cd nitime
+ git remote add upstream git://github.com/nipy/nitime.git
+
+In detail
+=========
+
+Clone your fork
+---------------
+
+#. Clone your fork to the local computer with ``git clone
+ git@github.com:your-user-name/nitime.git``
+#. Investigate. Change directory to your new repo: ``cd nitime``. Then
+ ``git branch -a`` to show you all branches. You'll get something
+ like::
+
+ * master
+ remotes/origin/master
+
+ This tells you that you are currently on the ``master`` branch, and
+ that you also have a ``remote`` connection to ``origin/master``.
+ What remote repository is ``remote/origin``? Try ``git remote -v`` to
+ see the URLs for the remote. They will point to your github_ fork.
+
+ Now you want to connect to the upstream `nitime github`_ repository, so
+ you can merge in changes from trunk.
+
+.. _linking-to-upstream:
+
+Linking your repository to the upstream repo
+--------------------------------------------
+
+::
+
+ cd nitime
+ git remote add upstream git://github.com/nipy/nitime.git
+
+``upstream`` here is just the arbitrary name we're using to refer to the
+main nitime_ repository at `nitime github`_.
+
+Note that we've used ``git://`` for the URL rather than ``git@``. The
+``git://`` URL is read only. This means we that we can't accidentally
+(or deliberately) write to the upstream repo, and we are only going to
+use it to merge into our own code.
+
+Just for your own satisfaction, show yourself that you now have a new
+'remote', with ``git remote -v show``, giving you something like::
+
+ upstream git://github.com/nipy/nitime.git (fetch)
+ upstream git://github.com/nipy/nitime.git (push)
+ origin git@github.com:your-user-name/nitime.git (fetch)
+ origin git@github.com:your-user-name/nitime.git (push)
+
+.. include:: git_links.inc
+
diff --git a/doc/devel/usecases.rst b/doc/devel/usecases.rst
new file mode 100644
index 0000000..6027238
--- /dev/null
+++ b/doc/devel/usecases.rst
@@ -0,0 +1,7 @@
+===========
+ Use Cases
+===========
+
+Below are some usecases written to demonstrate proposed APIs during
+development. This should be converted into end user documentation
+examples prior to our next release.
diff --git a/doc/discussion/base_classes.rst b/doc/discussion/base_classes.rst
new file mode 100644
index 0000000..591f9b7
--- /dev/null
+++ b/doc/discussion/base_classes.rst
@@ -0,0 +1,139 @@
+.. _base_classes:
+
+==============
+ Base classes
+==============
+
+We have two sets of base-classes. The first is used in order to represent time
+and inherits from :class:`np.ndarray`, see :ref:`time_classes`. The other are
+data containers, used to represent different kinds of time-series data, see
+:ref:`time_series_classes`
+
+.. _time_classes:
+
+Time
+====
+Experimental data is usually represented with regard to *relative* time. That
+is, the time relative to the beginning of the measurement. This is in contrast
+to many other kinds of data, which are represented with regard to *absolute*
+time, (one example of this kind of time is calendaric time, which includes a
+reference to some common point, such as 0 CE, or Jan. 1st 1970). An example of
+data which benefits from representation with absolute time is the
+representation of financial time-series, which can be compared against each
+other, using the common reference and for which the concept of the work-week
+applies.
+
+However, because most often the absolute calender time of the occurence of
+events in an experiment is of no importance, we can disregard it. Rather, the
+comparison of the time progression of data in different experiments conducted
+in different calendar times (different days, different times in the same day)
+is more common.
+
+The underlying representation of time in :module:`nitime` is in arrays of dtype
+:class:`int64`. This allows the representation to be immune to rounding errors
+arising from representation of time with floating point numbers (see
+[Goldberg1991]_). However, it restricts the smallest time-interval that can be
+represented. In :module:`nitime`, the smallest discrete time-points are of size
+:attribute:`base_unit`, and this unit is *picoseconds*. Thus, all underlying
+representations of time are made in this unit. Since for most practical uses,
+this representation is far too small, this might have resulted, in most cases
+in representations of time too long to be useful. In order to make the
+time-objects more manageable, time objects in :module:`nitime` carry a
+:attribute:`time_unit` and a :attribute:`_conversion_factor`, which can be used
+as a convenience, in order to convert between the representation of time in the
+base unit and the appearance of time in the relevant time-unit.
+
+The first set of base classes is a set of representations of time itself. All
+these classes inherit from :class:`np.array`. As mentioned above, the dtype of
+these classes is :class:`int64` and the underlying representation is always at
+the base unit. These representations will all serve as the underlying machinery
+to index into the :class:`TimeSeries` objects with arrays of time-points. The
+additional functionality common to all of these is described in detail in
+:ref:`time_series_access`. Briefly, they will all have an :func:`at` method,
+which allows indexing with time-objects of various kind. The result of this
+indexing will be to return the time-point in the the respective
+:class:`TimeSeries` which is most appropriate (see :ref:`time_series_access`
+for details). They will also all have an :func:`index_at` method, which returns
+the integer index of this time in the underlying array. Finally, they will all
+have a :func:`during` method, which will allow indexing into these objects with
+an :ref:`interval_class`. This will return the appropriate times corresponding
+to an :ref:`interval_class` and :func:`index_during`, which will return the
+array of integers corresponding to the indices of these time-points in the
+array.
+
+There are three types of Time base classes: :ref:`TimeArray`
+:ref:`NonUniformTime`, and :ref:`UniformTime`. :ref:`time_table` captures
+the essential differences between them.
+
+.. _TimeArray:
+
+:class:`TimeArray`
+-------------------
+
+This class has the least restrictions on it: it will be a 1d array, which
+contains time-points that are not neccesarily ordered. It can also contain
+several copies of the same time-point. This class will be used in order to
+represent sparsely occuring events, measured at some unspecified sampling rate
+and possibly collected from several different channels, where the data is
+sampled in order of channel and not in order of time. As in the case of the
+:class:`np.ndarray`, slicing into this kind of representation should allow a
+reshaping operation to occur, which would change the dimensions of the
+underlying array. In this case, this should allow a ragged/jagged array
+structure to emerge (see http://en.wikipedia.org/wiki/Array_data_structure for
+details). This representation of time carries, in addition to the array itself
+an attribute :attr:`time_unit`, which is the unit in which we would like to
+present the time-points (recall that the underlying representation is always in
+the base-unit).
+
+.. _NonUniformTime:
+
+:class:`NonUniformTime`
+-------------------------
+
+This class can be used in order to represent time with a varying sampling rate,
+or also represent events which occur at different times in an ordered
+series. Thus, the time-points in this representation are ordered (and
+unique?). This will be used as the time representation used in the
+:ref:`NonUniformTimeSeries` class. As in the case of the
+:class:`TimeArray`, slicing into a :class:`NonUniformTime` array should
+result in a ragged/jagged array.
+
+.. _UniformTime:
+
+:class:`UniformTime`
+--------------------
+
+This class contains ordered uniformly sampled time-points. This class has an
+explicit representation of :attr:`t_0`, :attr:`sampling_rate` and
+:attr:`sampling_interval` (the latter two implemented as
+:meth:`setattr_on_read`, which can be computed from each other). Thus, each
+element in this array can be used in order to represent the entire time
+interval $t$, such that: $t_i\leq t < t + \delta t$, where $t_i$ is the nominal
+value held by that element of the array, and $\delta t$ is the value of
+:attr:`sampling_interval`. As in the case of the
+:ref:`NonUniformTimeSeries`, this kind of class can be reshaped in such a way
+that induces an increase in the number of dimensions (see also
+:ref:`time_table`).
+.. XXX: is 'inducing an increase in the number of dimensions" the same as jagged/ragged array?
+
+This object will contain additional attributes that are not shared by the other
+time objects. In particular, an object of :class:`UniformTime`, UT, will have
+the following:
+
+* :attr:`UT.t_0`: the first time-point in the series.
+* :attr:`UT.sampling_rate`: the sampling rate of the series.
+* :attr:`UT.sampling_interval`: the value of $\delta t$, mentioned above.
+* :attr:`UT.duration`: the total time (in dtype :class:`deltatime64`) of
+ the series.
+
+Obviously, :attr:`UT.sampling_rate` and :attr:`UT.sampling_interval`
+are redundant, but can both be useful.
+
+
+:class:`Frequency`
+------------------
+
+The :attr:`UT.sampling_rate` of :class:`UniformTime` is an object of this
+class. This is a representation of the frequency in Hz. It is derived from a
+combination of the :attr:`sampling_interval` and the :attr:`time_unit`.
+
diff --git a/doc/discussion/index.rst b/doc/discussion/index.rst
new file mode 100644
index 0000000..968fc03
--- /dev/null
+++ b/doc/discussion/index.rst
@@ -0,0 +1,14 @@
+============
+ Discussion
+============
+
+This section discusses features which are under active development and not yet
+fully implemented.
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ note_about_discussion
+ multitaper_jackknife
diff --git a/doc/discussion/interval_object.rst b/doc/discussion/interval_object.rst
new file mode 100644
index 0000000..35461ca
--- /dev/null
+++ b/doc/discussion/interval_object.rst
@@ -0,0 +1,170 @@
+.. _interval_class:
+
+=================
+ Interval object
+=================
+
+Intervals can carry particular special meaning in the analysis of
+time-series. For example, a typical case, is when two time-series are recorded
+simultaneously. One is recorded from measurement of some continuous
+physilogical variable, such as fMRI BOLD (and is represented by a an object of
+type :ref:`UniformTimeSeries`). The other is a series of discrete events
+occuring concurrently (and can be represented by a :ref:`EventSeries` or by a
+:ref:`NonUniformTimeSeries`). For example, button presses by the subject, or
+trials of different kinds. If we want to analyze the progression of the
+physiological time-series, locked to particular kinds of events in the
+event-series, we would need a notion of an interval of time surrounding those
+events.
+
+
+In order to do that, we propose implementing a :class:`TimeInterval`
+object.
+
+.. _interval_attributes:
+
+Attributes
+----------
+
+A :class:`TimeInterval` object can be thought of as a :class:`Slice` for
+time-series objects and therefore should at least implement all attributes and
+methods of the slice object (sub-classing of slice doesn't seem to be possible,
+see :ref:`interval_from_slice`).
+
+In particular, an object of class :class:`TimeInterval`, :attr:`TI`, has
+the attributes/functions:
+
+* :attr:`TI.t_start`: the start time of the interval.
+* :attr:`TI.t_stop`: the end time of the interval.
+* :attr:`TI.duration`: the duration of the interval.
+
+Obviously, knowledge of two of these, should allow calculation of the
+third. Therefore, this should be implemented in the object with a
+:func:`setattr_on_read` decoration and the object should inherit
+:class:`ResetMixin`. Initialization of the object would
+verify that enough information exists and that the information provided is
+consistent, in the same manner that is already implemented in
+:class:`UniformTimeSeries`.
+
+* :attr:`TI.t_step`: originally, we thought that this could be abused to
+ represent a time offset, relative to the attributes :attr:`t_start` and
+ :attr:`t_stop`. That is, it can tell us where relative to these two
+ time-points some interesting even, which this interval surrounds, or this
+ interval is close to, occurs. This can be used in order to interpert how
+ time-series access is done using the :class:`TimeInterval` object. See
+ :ref:`time_series_access`. This attribute can be implemented as an optional
+ input on initialization, such that it defaults to be equal to
+ :attr:`t_start`.
+
+* :func:`TI.indices(len)`: this method returns a tuple of time points that can
+ be used for slicing. Originally, this is meant to produce a list of indices
+ of length len that can be directly used to obtain a slice of the same
+ length. However, when we use a :class:`TimeInterval` for slicing, we don't
+ know yet, how long the sliced object will be (it depends on the sampling
+ interval of the sliced object). If we just use len=0, the indices method just
+ returns a 3-tuple that still contains all necessary information and can be
+ used for slicing:
+
+.. code-block:: python
+
+ >>>TI.indices(0)
+ (TI.t_start TI.t_stop, TI.t_step)
+
+.. _interval_initialization:
+
+Initialization
+--------------
+
+There are various different ways to initialize a :class:`TimeInterval`:
+
+* With two time points t_start and t_stop, both of :class:`TimeArray`:
+
+.. code-block:: python
+
+ TimeInterval(t_start=t1,t_stop=t2)
+
+* With a time point :attr:`t_start` (a :class:`TimeArray`) and a duration (a
+ :class:`TimeArray`):
+
+.. code-block:: python
+
+ TimeInterval(t_start=t1,duration=t_duration)
+
+* With an optional third argument :attr:`t_step` (a :class:`TimeArray`)
+ indicating a time offset of a time point $t_0=t_{start}-t_{step}$ relative to
+ which the time inside the interval should be interpreted. The relevance of
+ this third argument will become clearer when the time interval is used to
+ slice into a time-series object (see :ref:`time_series_access`). Briefly -
+ the returned object would be a time-series object with the :attr:`t0`
+ attribute set to be the $t_0$ described above. If not provided, this would
+ default to be equal to :attr:`t_start`:
+
+.. code-block:: python
+
+ TimeInterval(t_start=t1, t_stop=t2, t_step=delta_t)
+
+or
+
+.. code-block:: python
+
+ TimeInterval (t_start=t1,duration=delta_t1, t_step=delta_t2)
+
+Finally, we would like allow setting the interval with floating point values,
+which will be interpreted as time points :attr:`t_start` and
+:attr:`t_stop`. This convention would be convenient, but requires that the
+initialization of the object will know what the units are. In order to make
+this possible, the interval (similar to the current implementation of the
+time-series object will have an attribute :attr:`t_unit`, which would default
+to 's'. The initialization will then cast the values provided into the
+appropriate :class:`TimeArray` objects.
+
+.. _interval_from_slice:
+Implementation using a slice object
+-----------------------------------
+
+Sub-classing of the slice object doesn't seem to be possible:
+
+ >>> class myslice(slice):
+ ... pass
+ ...
+ ------------------------------------------------------------
+ Traceback (most recent call last):
+ File "<ipython console>", line 1, in <module>
+ TypeError: Error when calling the metaclass bases
+ type 'slice' is not an acceptable base type
+
+However, it seems that a (scalar) :class:`TimeInterval` can be implemented
+using a slice object, provided the time points :attr:`t_start` and
+:attr:`t_stop` and the time offset :attr:`t_step` implement an __index__
+method:
+
+ >>> s = slice('t_start','t_stop','t_step')
+ >>> s.start
+ 't_start'
+ >>> s.stop
+ 't_stop'
+ >>> s.step
+ 't_step'
+ >>> s.indices(1)
+ ------------------------------------------------------------
+ Traceback (most recent call last):
+ File "<ipython console>", line 1, in <module>
+ TypeError: slice indices must be integers or None or have an __index__ method
+
+Alternatively, the :class:`TimeInterval` can be implemented as an original
+object with the default constructor as similar as possible to the constructor
+of the slice object, so that we can use slice-like operations, but still
+maintain slice algebra and such.
+
+In addition to the possibility of algebraic operations, there are other reasons
+to have the :class:`TimeInterval` be an original class that holds a slice
+object that can be returned by the method :func:`TI.asslice()`.
+
+.. _interval_arrays:
+
+Interval arrays
+---------------
+
+In addition to scalar :class:`TimeInterval` objects, it also makes sense to
+define arrays of :class:`TimeInterval` objects. These arrays can be implemented
+as :class:`np.ndarray`, with an :class:`object` dtype.
+
diff --git a/doc/discussion/multitaper_jackknife.rst b/doc/discussion/multitaper_jackknife.rst
new file mode 100644
index 0000000..212596e
--- /dev/null
+++ b/doc/discussion/multitaper_jackknife.rst
@@ -0,0 +1,83 @@
+=======================================
+Jack-knifing a Multitaper SDF estimator
+=======================================
+
+Assume there is a parameter :math:`\theta` that parameterizes a distribution, and that the set of random variables :math:`\lbrace Y_1, Y_2, ..., Y_n \rbrace` are i.i.d. according to that distribution.
+
+The basic jackknifed estimator :math:`\tilde{\theta}` of some parameter :math:`\theta` is found through forming *pseudovalues* :math:`\hat{\theta}_i` based on the original set of samples. With *n* samples, there are *n* pseudovalues based on *n* "leave-one-out" sample sets.
+
+General JN definitions
+----------------------
+
+| **simple sample estimate**
+| :math:`\hat{\theta} = \dfrac{1}{n}\sum_i Y_i`
+| **leave-one-out measurement**
+| :math:`\hat{\theta}_{-i} = \dfrac{1}{n-1}\sum_{k \neq i}Y_k`
+| **pseudovalues**
+| :math:`\hat{\theta}_i = n\hat{\theta} - (n-1)\hat{\theta}_{-i}`
+
+Now the jackknifed esimator is computed as
+
+:math:`\tilde{\theta} = \dfrac{1}{n}\sum_i \hat{\theta}_i = n\hat{\theta} - \dfrac{n-1}{n}\sum_i \hat{\theta}_{-i}`
+
+This estimator is known (?) to be distributed about the true parameter \theta approximately as a Student's t distribution, with standard error defined as
+
+:math:`s^{2} = \dfrac{n-1}{n}\sum_i \left(\hat{\theta}_i - \tilde{\theta}\right)^{2}`
+
+General Multitaper definition
+-----------------------------
+
+The general multitaper spectral density function (sdf) estimator, using *n* orthonormal tapers, combines the *n* :math:`\lbrace \hat{S}_i^{mt} \rbrace` sdf estimators, and takes the form
+
+:math:`\hat{S}^{mt}(f) = \dfrac{\sum_{k} w_k(f)^2S^{mt}_k(f)}{\sum_{k} |w_k(f)|^2} = \dfrac{\sum_{k} w_k(f)^2S^{mt}_k(f)}{\lVert \vec{w}(f) \rVert^2}`
+
+For instance, using discrete prolate spheroidal sequences (DPSS) windows, the :math:`\rbrace w_i \lbrace` set, in their simplest form, are the eigenvalues of the spectral concentration system.
+
+A natural choice for a *leave-one-out* measurement is (leaving out the dependence on argument *f*)
+
+:math:`\ln\hat{S}_{-i}^{mt} = \ln\dfrac{\sum_{k \neq i} w_k^2S^{mt}_k}{\lVert \vec{w}_{-i} \rVert^2} = \ln\sum_{k \neq i} w_k^2S^{mt}_k - \ln\lVert \vec{w}_{-i} \rVert^2`
+
+where :math:`\vec{w}_{-i}` is the vector of weights with the *ith* element set to zero. The natural log has been taken so that the estimate is distributed below and above :math:`S(f)` more evenly.
+
+Multitaper Pseudovalues
+-----------------------
+
+I'm not quite clear on the form of the pseudovalues for multitaper combinations.
+
+One Option
+``````````
+
+The simple option is to weight the different *leave-one-out* measurements equally, which leads to
+
+:math:`\ln\hat{S}_{i}^{mt} = n\ln\hat{S}^{mt} - (n-1)\ln\hat{S}_{-i}^{mt}`
+
+And of course the estimate of :math:`S(f)` is given by
+
+:math:`\ln\tilde{S}^{mt} (f) = \dfrac{1}{n}\sum_i \ln\hat{S}_i^{mt}(f)`
+
+Another Option
+``````````````
+
+Another approach seems obvious which weights the *leave-one-out* measurements according to the length of :math:`\vec{w}_{-i}`. It would look something like this
+
+| let
+| :math:`g = {\lVert \vec{w} \rVert^2}`
+| :math:`g_i = {\lVert \vec{w}_{-i} \rVert^2}`
+
+Then the pseudovalues are
+
+:math:`\ln\hat{S}_i^{mt} = \left(\ln\hat{S}^{mt} + \ln g\right) - \left(\ln\hat{S}_{-i}^{mt} + \ln g_i\right)`
+
+and the jackknifed estimator is
+
+:math:`\ln\tilde{S}^{mt} = \sum_i \ln\hat{S}_i^{mt} - \ln g`
+
+and I would wager, the standard error is estimated as
+
+:math:`s^2 = \dfrac{1}{n}\sum_i \left(\ln\hat{S}_i^{mt} - \ln\tilde{S}^{mt}\right)^2`
+
+Consensus in Literature (??)
+````````````````````````````
+
+From what I can tell from a couple of sources from Thompson [#f1]_, [#f2]_, this is the approach for estimating the variance.
+
diff --git a/doc/discussion/note_about_discussion.rst b/doc/discussion/note_about_discussion.rst
new file mode 100644
index 0000000..0a860e7
--- /dev/null
+++ b/doc/discussion/note_about_discussion.rst
@@ -0,0 +1,14 @@
+===================================
+A note about the discussion section
+===================================
+
+This section is meant as an area of the documentation meant for discussion of
+the implementation of various aspects of nitime, including API and analysis.
+
+However, it does not necessarily reflect the current state of the
+software. Some things may be implemented differently than described here, or
+not implemented at all.
+
+For documentation oriented at actual usage of the software, please refer to
+:ref:`users-guide` and :ref:`examples`
+
diff --git a/doc/discussion/time_series_access.rst b/doc/discussion/time_series_access.rst
new file mode 100644
index 0000000..a1df225
--- /dev/null
+++ b/doc/discussion/time_series_access.rst
@@ -0,0 +1,156 @@
+.. _time_series_access:
+
+====================
+ Time-series access
+====================
+
+Since one dimension of time-series data is associated with time, there is a
+natural way to index into time-series data using time objects as indices. For
+the data classes (:ref:`time_series_classes`) an indexing operation performed
+with a single time-point (a single-element :class:`TimeArray` or a
+:class:`TimeArray` with more than one element) should result in returning the
+data at that time-point - that is, removal of the time-dimension from the data.
+
+The base-classes representing time (:ref:`time_classes`) serve as natural
+intermediaries in this process, by providing the integer index of a particular
+time-point (or returning an array of integers, as the case may be, see
+below). Therefore, these classes should include a method which performs this
+conversion, :func:`index_at`. This function should accept as parameter a
+:class:`TimeArray` and return integers corresponding to the location of that
+time-point in the different types of time classes.
+
+Access into Time classes
+------------------------
+
+:class:`EventArray`
+~~~~~~~~~~~~~~~~~~~
+
+:func:`ev.index_at` returns the indices of the values in the array that are
+*closest* to t. That is, it returns i, such that $|(t-t_i)|$ is
+the minimal.
+
+Potentially, an optional 'tolerance' argument can be implemented, specifying a
+maximal time difference between the index time and the returned time.
+
+
+:class:`NonUniformTime`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+As above, :func:`nut.index_at` also returns the indices in the array that
+are closest to t. Since :class:`NonUniformTime` is ordered, this should give
+you either the index below or the index above the time-point you provide as
+input, depending on what interval ($|t-t_i|$ or $|t-t_{i+1}|$) is
+smaller.
+
+
+:class:`UniformTime`
+~~~~~~~~~~~~~~~~~~~~
+
+:func:`ut.index_at` returns the indices of the values in the array that are
+the largest time values, smaller thatn the input values t. That is, it returns i
+for which $t_i$ is the maximal one, which still fulfills: $t_i<t$.
+
+Questions
+~~~~~~~~~
+The follwing questions apply to all three cases:
+
+* what happens when the t is smaller than the smallest entry in the array
+ return None?
+* what happens when t is larget than the last entry in the time array? return
+ None?
+
+:func:`at`
+~~~~~~~~~~
+
+This function extracts the value of the time array, which corresponds to the
+output of :func:`index_at` with an input t.
+
+That is, for an instance :class:`T` of one of the time classes, this function
+will return:
+
+.. code-block:: python
+
+ T.time[T.index_at(t)]
+
+
+Indexing into data time-series objects
+--------------------------------------
+
+Indexing with time
+~~~~~~~~~~~~~~~~~~
+
+The above function :func:`index_at` serves as the basis for the
+implementation of the function :func:`at` for the time-series data objects.
+This function returns the part of the data in :class:`UniformTimeSeries.data`
+(or the equivalent data structure in :class:`EventSeries` and
+:class:`NonUniformTimeSeries`) that corresponds to the times provided.
+
+Importantly, the result of indexing into a time-series data object using a time
+object is always again either an instance of the same time-series data class or
+an instance of a vanilla nd-array. The latter case only occurs, when a single
+time point is used to index into the time-series data and is analogous to
+indexing with a single integer into an nd-array. Conversion between different
+time-series classes can occur if the indexing time-points are non-uniform (for
+conversion between :class:`UniformTimeSeries` and
+:class:`NonUniformTimeSeries`) or if the time-points are not ordered (for
+conversion from :class:`UniformTimeSeries` or from
+:class:`NonUniformTimeSeries` to :class:`EventSeries`).
+
+Currently, the plan is to implement the indexing operation using the method
+:func:`at` and only later to map the method :meth:`ts.__getitem__` to the
+function :func:`ts.at`. For now, we not that using the function :func:`ts.at`
+directly is more flexible since it allows to use additional keyword arguments,
+so, for now, it is unclear what to set as the default behavior for :func:`at`,
+which will be executed by :meth:`__getitem__`.
+
+The function :func:`during` will receive as input a :class:`TimeInterval`
+objects and will return the data corresponding to the interval, while dealing
+appropriately with the :attr:`TI.t_step` (see :ref:`interval_class` for
+details). How is this done? For an object of class :class:`UniformTimeSeries`,
+access using intervals, will give you back a uniform time-series objects with
+the time being of length of :attr:`TI.t_start` - :attr:`TI.t_stop` and with
+the :attr:`TS.t0` offset by the :class:`TimeInterval`'s
+:attr:`TI.t_step`.
+
+Indexing with integers
+~~~~~~~~~~~~~~~~~~~~~~
+
+In parallel to the access with time-points, described above, we would like to
+implement indexing the time-series classes directly using integer indices and
+ordinary slices (with integer start, stop, and step). This should have the same
+effect as indexing the underlying nd-array using the same indices and slices,
+such that:
+
+.. code-block:: python
+
+ T.at(T.time.index_at(i)) = T[i] = T.data[...,i]
+ T.time.at(i) = T.time[i] = T.time.asarray()[i]
+
+In order to make the above code more compact, would be another reason to
+implement the the time dimension as the first dimension (not last, see
+:ref:`time_series_classes`): this would allow to rewrite the above:
+
+.. code-block:: python
+
+ T.at(i) = T[i] = T.data[i]
+
+
+Every time-series data (and time) object would also implements a method
+:func:`T.slice_at` that given a :class:`TimeInterval` object TI (see
+:ref:`interval_class`) returns an integer slice slice(i,j) suitable for
+indexing both into the nd-array :attr:`T.data` and into
+:attr:`T.time`:
+
+.. code-block:: python
+
+
+ T.interval2slice(TI) = slice(T.time2index(TI.t_start),
+ T.time2index(TI.t_stop))
+
+ data_slice = T.data[...,T.slice_at(TI)]
+ time_slice = T.time[T.slice_at(TI)]
+
+
+
+
+
diff --git a/doc/documentation.rst b/doc/documentation.rst
new file mode 100644
index 0000000..7a425f8
--- /dev/null
+++ b/doc/documentation.rst
@@ -0,0 +1,20 @@
+======================
+ Nitime Documentation
+======================
+
+.. htmlonly::
+
+ :Release: |version|
+ :Date: |today|
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ users/index
+ whatsnew/index
+ examples/index
+ discussion/index
+ devel/index
+ api/index
diff --git a/doc/examples/ar_est_1var.py b/doc/examples/ar_est_1var.py
new file mode 100755
index 0000000..29ce8cd
--- /dev/null
+++ b/doc/examples/ar_est_1var.py
@@ -0,0 +1,100 @@
+"""
+
+.. _ar:
+
+===============================================
+Fitting an AR model: algorithm module interface
+===============================================
+
+Auto-regressive (AR) processes are processes that follow the following
+equation:
+
+.. math::
+
+ x_t = \sum_{i=1}^{n}a_i * x_{t-i} + \epsilon_t
+
+In this example, we will demonstrate the estimation of the AR model
+coefficients and the estimation of the AR process spectrum, based on the
+estimation of the coefficients.
+
+We start with imports from numpy, matplotlib and import :mod:`nitime.utils` as
+well as :mod:`nitime.algorithms:`
+
+"""
+
+import numpy as np
+from matplotlib import pyplot as plt
+
+from nitime import utils
+from nitime import algorithms as alg
+from nitime.timeseries import TimeSeries
+from nitime.viz import plot_tseries
+
+"""
+
+We define some variables, which will be used in generating the AR process:
+
+"""
+
+npts = 2048
+sigma = 0.1
+drop_transients = 128
+Fs = 1000
+
+"""
+
+In this case, we generate an order 2 AR process, with the following coefficients:
+
+
+"""
+
+coefs = np.array([0.9, -0.5])
+
+"""
+
+This generates the AR(2) time series:
+
+"""
+
+X, noise, _ = utils.ar_generator(npts, sigma, coefs, drop_transients)
+
+ts_x = TimeSeries(X, sampling_rate=Fs, time_unit='s')
+ts_noise = TimeSeries(noise, sampling_rate=1000, time_unit='s')
+
+"""
+
+We use the plot_tseries function in order to visualize the process:
+
+"""
+
+fig01 = plot_tseries(ts_x, label='AR signal')
+fig01 = plot_tseries(ts_noise, fig=fig01, label='Noise')
+fig01.axes[0].legend()
+
+"""
+
+.. image:: fig/ar_est_1var_01.png
+
+
+Now we estimate back the model parameters, using two different estimation
+algorithms.
+
+
+"""
+
+coefs_est, sigma_est = alg.AR_est_YW(X, 2)
+# no rigorous purpose behind 100 transients
+X_hat, _, _ = utils.ar_generator(
+ N=npts, sigma=sigma_est, coefs=coefs_est, drop_transients=100, v=noise
+ )
+fig02 = plt.figure()
+ax = fig02.add_subplot(111)
+ax.plot(np.arange(100, len(X_hat) + 100), X_hat, label='estimated process')
+ax.plot(X, 'g--', label='original process')
+ax.legend()
+err = X_hat - X[100:]
+mse = np.dot(err, err) / len(X_hat)
+ax.set_title('Mean Square Error: %1.3e' % mse)
+
+
+plt.show()
diff --git a/doc/examples/ar_est_2vars.py b/doc/examples/ar_est_2vars.py
new file mode 100644
index 0000000..334bcac
--- /dev/null
+++ b/doc/examples/ar_est_2vars.py
@@ -0,0 +1,355 @@
+
+"""
+
+.. _mar:
+
+=====================================
+Mulitvariate auto-regressive modeling
+=====================================
+
+Multivariate auto-regressive modeling uses a simple
+
+This example is based on Ding, Chen and Bressler 2006 [Ding2006]_.
+
+
+We start by importing the required libraries:
+
+
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+"""
+
+From nitime, we import the algorithms and the utils:
+
+"""
+
+import nitime.algorithms as alg
+import nitime.utils as utils
+
+
+"""
+
+Setting the random seed assures that we always get the same 'random' answer:
+
+"""
+
+np.random.seed(1981)
+
+"""
+
+We will generate an AR(2) model, with the following coefficients (taken from
+[Ding2006]_, eq. 55):
+
+.. math::
+
+ x_t = 0.9x_{t-1} - 0.5 x_{t-2} + \epsilon_t
+
+.. math::
+
+ y_t = 0.8y_{t-1} - 0.5 y_{t-2} + 0.16 x_{t-1} - 0.2 x_{t-2} + \eta_t
+
+Or more succinctly, if we define:
+
+.. math::
+
+ Z_{t}=\left(\begin{array}{c}
+ x_{t}\\
+ y_{t}\end{array}\right),\,E_t=\left(\begin{array}{c}
+ \epsilon_{t}\\
+ \eta_{t}\end{array}\right)
+
+then:
+
+.. math::
+
+ Z_t = A_1 Z_{t-1} + A_2 Z_{t-2} + E_t
+
+where:
+
+.. math::
+
+ E_t \sim {\cal N} (\mu,\Sigma) \mathrm{, where} \,\, \Sigma=\left(\begin{array}{cc}var_{\epsilon} & cov_{xy}\\ cov_{xy} & var_{\eta}\end{array}\right)
+
+
+We now build the two :math:`A_i` matrices with the values indicated above:
+
+"""
+
+a1 = np.array([[0.9, 0],
+ [0.16, 0.8]])
+
+a2 = np.array([[-0.5, 0],
+ [-0.2, -0.5]])
+
+
+"""
+
+For implementation reasons, we rewrite the equation (:ref:`eqn_ar`) as follows:
+
+.. math::
+
+ Z_t + \sum_{i=1}^2 a_i Z_{t-i} = E_t
+
+where: $a_i = - A_i$:
+
+"""
+
+am = np.array([-a1, -a2])
+
+
+"""
+
+
+The variances and covariance of the processes are known (provided as part of
+the example in [Ding2006]_, after eq. 55):
+
+
+"""
+
+x_var = 1
+y_var = 0.7
+xy_cov = 0.4
+cov = np.array([[x_var, xy_cov],
+ [xy_cov, y_var]])
+
+
+"""
+
+We can calculate the spectral matrix analytically, based on the known
+coefficients, for 1024 frequency bins:
+
+"""
+
+n_freqs = 1024
+
+w, Hw = alg.transfer_function_xy(am, n_freqs=n_freqs)
+Sw_true = alg.spectral_matrix_xy(Hw, cov)
+
+"""
+
+Next, we will generate 500 example sets of 100 points of these processes, to analyze:
+
+
+"""
+
+#Number of realizations of the process
+N = 500
+#Length of each realization:
+L = 1024
+
+order = am.shape[0]
+n_lags = order + 1
+
+n_process = am.shape[-1]
+
+z = np.empty((N, n_process, L))
+nz = np.empty((N, n_process, L))
+
+for i in xrange(N):
+ z[i], nz[i] = utils.generate_mar(am, cov, L)
+
+"""
+
+We can estimate the 2nd order AR coefficients, by averaging together N
+estimates of auto-covariance at lags k=0,1,2
+
+Each $R^{xx}(k)$ has the shape (2,2), where:
+
+.. math::
+
+ R^{xx}_{00}(k) = E( Z_0(t)Z_0^*(t-k) )
+
+.. math::
+
+ R^{xx}_{01}(k) = E( Z_0(t)Z_1^*(t-k) )
+
+.. math::
+
+ R^{xx}_{10}(k) = E( Z_1(t)Z_0^*(t-k) )
+
+.. math::
+
+ R^{xx}_{11}(k) = E( Z_1(t)Z_1^*(t-k) )
+
+
+Where $E$ is the expected value and $^*$ marks the conjugate transpose. Thus, only $R^{xx}(0)$ is symmetric.
+
+This is calculated by using the function :func:`utils.autocov_vector`. Notice
+that the estimation is done for an assumed known process order. In practice, if
+the order of the process is unknown, we will have to use some criterion in
+order to choose an appropriate order, given the data.
+
+"""
+
+Rxx = np.empty((N, n_process, n_process, n_lags))
+
+for i in xrange(N):
+ Rxx[i] = utils.autocov_vector(z[i], nlags=n_lags)
+
+Rxx = Rxx.mean(axis=0)
+
+R0 = Rxx[..., 0]
+Rm = Rxx[..., 1:]
+
+Rxx = Rxx.transpose(2, 0, 1)
+
+
+"""
+
+We use the Levinson-Whittle(-Wiggins) and Robinson algorithm, as described in [Morf1978]_
+, in order to estimate the MAR coefficients and the covariance matrix:
+
+"""
+
+a, ecov = alg.lwr_recursion(Rxx)
+
+"""
+
+Next, we use the calculated coefficients and covariance matrix, in order to
+calculate Granger 'causality':
+
+"""
+
+w, f_x2y, f_y2x, f_xy, Sw = alg.granger_causality_xy(a,
+ ecov,
+ n_freqs=n_freqs)
+
+
+"""
+
+This results in several different outputs, which we will proceed to plot.
+
+First, we will plot the estimated spectrum. This will be compared to two other
+estimates of the spectrum. The first is the 'true' spectrum, calculated from
+the known coefficients that generated the data:
+
+"""
+
+fig01 = plt.figure()
+ax01 = fig01.add_subplot(1, 1, 1)
+
+# This is the estimate:
+Sxx_est = np.abs(Sw[0, 0])
+Syy_est = np.abs(Sw[1, 1])
+
+# This is the 'true' value, corrected for one-sided spectral density functions
+Sxx_true = Sw_true[0, 0].real
+Syy_true = Sw_true[1, 1].real
+
+"""
+
+The other is an estimate based on a multi-taper spectral estimate from the
+empirical signals:
+
+"""
+
+c_x = np.empty((L, w.shape[0]))
+c_y = np.empty((L, w.shape[0]))
+
+for i in xrange(N):
+ frex, c_x[i], nu = alg.multi_taper_psd(z[i][0])
+ frex, c_y[i], nu = alg.multi_taper_psd(z[i][1])
+
+"""
+
+We plot these on the same axes, for a direct comparison:
+
+"""
+
+ax01.plot(w, Sxx_true, 'b', label='true Sxx(w)')
+ax01.plot(w, Sxx_est, 'b--', label='estimated Sxx(w)')
+ax01.plot(w, Syy_true, 'g', label='true Syy(w)')
+ax01.plot(w, Syy_est, 'g--', label='estimated Syy(w)')
+ax01.plot(w, np.mean(c_x, 0), 'r', label='Sxx(w) - MT PSD')
+ax01.plot(w, np.mean(c_y, 0), 'r--', label='Syy(w) - MT PSD')
+
+ax01.legend()
+
+"""
+
+.. image:: fig/ar_est_2vars_01.png
+
+Next, we plot the granger causalities. There are three GCs. One for each
+direction of causality between the two processes (X => Y and Y => X). In
+addition, there is the instanteaneous causality between the processes:
+
+"""
+
+fig02 = plt.figure()
+ax02 = fig02.add_subplot(1, 1, 1)
+
+# x causes y plot
+ax02.plot(w, f_x2y, label='X => Y')
+# y causes x plot
+ax02.plot(w, f_y2x, label='Y => X')
+# instantaneous causality
+ax02.plot(w, f_xy, label='X:Y')
+
+ax02.legend()
+
+"""
+
+.. image:: fig/ar_est_2vars_02.png
+
+
+Note that these results make intuitive sense, when you look at the equations
+governing the mutual influences. X is entirely influenced by X (no effects of Y
+on X in :ref:`eq1`) and there is some influence of X on Y (:ref:`eq2`),
+resulting in this pattern.
+
+Finally, we calculate the total causality, which is the sum of all the above
+causalities. We compare this to the interdependence between the processes. This is the
+measure of total dependence and is closely akin to the coherence between the
+processes. We also compare to the empirically calculated coherence:
+
+"""
+
+fig03 = plt.figure()
+ax03 = fig03.add_subplot(1, 1, 1)
+
+# total causality
+ax03.plot(w, f_xy + f_x2y + f_y2x, label='Total causality')
+
+#Interdepence:
+f_id = alg.interdependence_xy(Sw)
+ax03.plot(w, f_id, label='Interdependence')
+
+coh = np.empty((N, 33))
+
+for i in xrange(N):
+ frex, this_coh = alg.coherence(z[i])
+ coh[i] = this_coh[0, 1]
+
+ax03.plot(frex, np.mean(coh, axis=0), label='Coherence')
+
+ax03.legend()
+
+"""
+
+.. image:: fig/ar_est_2vars_03.png
+
+
+Finally, we call plt.show(), in order to show the figures:
+
+"""
+
+plt.show()
+
+"""
+
+
+.. [Ding2006] M. Ding, Y. Chen and S.L. Bressler (2006) Granger causality:
+ basic theory and application to neuroscience. In Handbook of Time Series
+ Analysis, ed. B. Schelter, M. Winterhalder, and J. Timmer, Wiley-VCH
+ Verlage, 2006: 451-474
+
+.. [Morf1978] M. Morf, A. Vieira and T. Kailath (1978) Covariance
+ Characterization by Partial Autocorrelation Matrices. The Annals of Statistics,
+ 6: 643-648
+
+
+"""
diff --git a/doc/examples/ar_est_3vars.py b/doc/examples/ar_est_3vars.py
new file mode 100644
index 0000000..7e21504
--- /dev/null
+++ b/doc/examples/ar_est_3vars.py
@@ -0,0 +1,254 @@
+"""
+
+.. _mar3:
+
+=====================================================
+ Mulitvariate auto-regressive modeling - 3 variables
+=====================================================
+
+This example is an extension of the example presented here: :ref:`mar`. Here,
+instead of 2 variables and the mutual influences between them, we map out the
+mutual interactions between three variables. This example follows closely an
+example provided in the paper by Ding, Chen and Bressler (2006) [Ding2006]_.
+
+
+Start with the necessary imports:
+
+"""
+
+import numpy as np
+import matplotlib.pyplot as pp
+
+import nitime.algorithms as alg
+import nitime.utils as utils
+
+
+"""
+
+Set the random seed:
+
+"""
+
+np.random.seed(1981)
+
+"""
+
+simulate two multivariate autoregressive systems.
+
+The first is defined by the following equations:
+
+.. math::
+
+ X(t) = 0.8X_{t-1} - 0.5X_{t-2} + 0.4Z_{t-1} + \epsilon_x
+
+.. math::
+
+ Y(t) = 0.9Y_{t-1} - 0.8Y_{t-2} + \epsilon_y
+
+.. math::
+
+ Z(t) = 0.5Z_{t-1} - 0.2Z_{t-2} + 0.5Y_{t-1} + \epsilon_z
+
+"""
+
+
+cov = np.diag([0.3, 1.0, 0.2])
+
+
+a1 = -np.array([[0.8, 0.0, 0.4],
+ [0.0, 0.9, 0.0],
+ [0.0, 0.5, 0.5]])
+
+
+"""
+
+The second is defined by the following equations:
+
+.. math::
+
+ X(t) = 0.8X_{t-1} - 0.5X_{t-2} + 0.4Z_{t-1} + 0.2Y_{t-2} + \epsilon_x
+
+.. math::
+
+ Y(t) = 0.9Y_{t-1} - 0.8Y_{t-2} + \epsilon_y
+
+.. math::
+
+ Z(t) = 0.5Z_{t-1} -0.2Z_{t-2} + 0.5Y_{t-1} + \epsilon_z
+
+"""
+
+
+a2 = -np.array([[-0.5, 0.0, 0.0],
+ [0.0, -0.8, 0.0],
+ [0.0, 0.0, -0.2]])
+
+a = np.array([a1.copy(), a2.copy()])
+
+"""
+
+Add some feedback from Y to X at 2 lags:
+
+"""
+
+
+a2[0, 1] = -0.2
+
+b = np.array([a1.copy(), a2.copy()])
+
+
+def extract_ij(i, j, m):
+ m_ij_rows = m[[i, j]]
+ return m_ij_rows[:, [i, j]]
+
+"""
+
+We calculate the transfer function based on the coefficients:
+
+"""
+
+w, Haw = alg.transfer_function_xy(a)
+w, Hbw = alg.transfer_function_xy(b)
+
+
+"""
+
+Generate 500 sets of 100 points
+
+"""
+
+
+N = 500
+L = 100
+
+
+"""
+
+Generate the instances of the time-series based on the coefficients:
+
+"""
+
+za = np.empty((N, 3, L))
+zb = np.empty((N, 3, L))
+ea = np.empty((N, 3, L))
+eb = np.empty((N, 3, L))
+for i in xrange(N):
+ za[i], ea[i] = utils.generate_mar(a, cov, L)
+ zb[i], eb[i] = utils.generate_mar(b, cov, L)
+
+"""
+
+Try to estimate the 2nd order (m)AR coefficients-- Average together N estimates
+of auto-covariance at lags k=0,1,2
+
+"""
+
+Raxx = np.empty((N, 3, 3, 3))
+Rbxx = np.empty((N, 3, 3, 3))
+
+for i in xrange(N):
+ Raxx[i] = utils.autocov_vector(za[i], nlags=3)
+ Rbxx[i] = utils.autocov_vector(zb[i], nlags=3)
+
+
+"""
+
+Average trials together to find autocovariance estimate, and extract pairwise
+components from the ac sequence:
+
+"""
+
+Raxx = Raxx.mean(axis=0)
+xyRa = extract_ij(0, 1, Raxx)
+xzRa = extract_ij(0, 2, Raxx)
+yzRa = extract_ij(1, 2, Raxx)
+
+Rbxx = Rbxx.mean(axis=0)
+xyRb = extract_ij(0, 1, Rbxx)
+xzRb = extract_ij(0, 2, Rbxx)
+yzRb = extract_ij(1, 2, Rbxx)
+
+"""
+
+Now estimate mAR coefficients and covariance from the full and pairwise relationships:
+
+"""
+
+Raxx = Raxx.transpose(2, 0, 1)
+a_est, cov_est1 = alg.lwr_recursion(Raxx)
+a_xy_est, cov_xy_est1 = alg.lwr_recursion(xyRa.transpose(2, 0, 1))
+a_xz_est, cov_xz_est1 = alg.lwr_recursion(xzRa.transpose(2, 0, 1))
+a_yz_est, cov_yz_est1 = alg.lwr_recursion(yzRa.transpose(2, 0, 1))
+
+Rbxx = Rbxx.transpose(2, 0, 1)
+b_est, cov_est2 = alg.lwr_recursion(Rbxx)
+b_xy_est, cov_xy_est2 = alg.lwr_recursion(xyRb.transpose(2, 0, 1))
+b_xz_est, cov_xz_est2 = alg.lwr_recursion(xzRb.transpose(2, 0, 1))
+b_yz_est, cov_yz_est2 = alg.lwr_recursion(yzRb.transpose(2, 0, 1))
+
+
+"""
+
+We proceed to visualize these relationships:
+
+"""
+
+fig01 = pp.figure()
+
+w, x2y_a, y2x_a, _, _ = alg.granger_causality_xy(a_xy_est, cov_xy_est1)
+w, x2y_b, y2x_b, _, _ = alg.granger_causality_xy(b_xy_est, cov_xy_est2)
+ax01 = fig01.add_subplot(321)
+ax01.plot(w, x2y_a, 'b--')
+ax01.plot(w, x2y_b, 'b')
+ax01.set_title('x to y')
+ax01.set_ylim((0, 6))
+ax02 = fig01.add_subplot(322)
+ax02.plot(w, y2x_a, 'b--')
+ax02.plot(w, y2x_b, 'b')
+ax02.set_title('y to x')
+ax02.set_ylim((0, 6))
+
+w, y2z_a, z2y_a, _, _ = alg.granger_causality_xy(a_yz_est, cov_yz_est1)
+w, y2z_b, z2y_b, _, _ = alg.granger_causality_xy(b_yz_est, cov_yz_est2)
+ax03 = fig01.add_subplot(323)
+ax03.plot(w, y2z_a, 'b--')
+ax03.plot(w, y2z_b, 'b')
+ax03.set_title('y to z')
+ax03.set_ylim((0, 6))
+ax03 = fig01.add_subplot(324)
+ax03.plot(w, z2y_a, 'b--')
+ax03.plot(w, z2y_b, 'b')
+ax03.set_title('z to y')
+ax03.set_ylim((0, 6))
+
+w, x2z_a, z2x_a, _, _ = alg.granger_causality_xy(a_xz_est, cov_xz_est1)
+w, x2z_b, z2x_b, _, _ = alg.granger_causality_xy(b_xz_est, cov_xz_est2)
+ax04 = fig01.add_subplot(325)
+ax04.plot(w, x2z_a, 'b--')
+ax04.plot(w, x2z_b, 'b')
+ax04.set_title('x to z')
+ax04.set_ylim((0, 6))
+ax05 = fig01.add_subplot(326)
+ax05.plot(w, z2x_a, 'b--')
+ax05.plot(w, z2x_b, 'b')
+ax05.set_title('z to x')
+ax05.set_ylim((0, 6))
+
+pp.show()
+
+
+"""
+
+Compare to figure 3 in [Ding2006]_:
+
+.. image:: fig/ar_est_3vars_01.png
+
+
+
+.. [Ding2006] M. Ding, Y. Chen and S.L. Bressler (2006) Granger causality:
+ basic theory and application to neuroscience. In Handbook of Time Series
+ Analysis, ed. B. Schelter, M. Winterhalder, and J. Timmer, Wiley-VCH
+ Verlage, 2006: 451-474
+
+
+"""
diff --git a/doc/examples/ar_model_fit.py b/doc/examples/ar_model_fit.py
new file mode 100644
index 0000000..527dcb7
--- /dev/null
+++ b/doc/examples/ar_model_fit.py
@@ -0,0 +1,149 @@
+""" .. _model_fit:
+
+========================================
+Fitting an MAR model: analyzer interface
+========================================
+
+In this example, we will use the Analyzer interface to fit a multi-variate
+auto-regressive model with two time-series influencing each other.
+
+We start by importing 3rd party modules:
+
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+"""
+
+And then by importing Granger analysis sub-module, which we will use for fitting the MAR
+model:
+
+"""
+
+import nitime.analysis.granger as gc
+
+"""
+
+The utils sub-module includes a function to generate auto-regressive processes
+based on provided coefficients:
+
+"""
+
+import nitime.utils as utils
+
+
+"""
+
+Generate some MAR processes (according to Ding and Bressler [Ding2006]_),
+
+"""
+
+a1 = np.array([[0.9, 0],
+ [0.16, 0.8]])
+
+a2 = np.array([[-0.5, 0],
+ [-0.2, -0.5]])
+
+am = np.array([-a1, -a2])
+
+x_var = 1
+y_var = 0.7
+xy_cov = 0.4
+cov = np.array([[x_var, xy_cov],
+ [xy_cov, y_var]])
+
+
+"""
+
+Number of realizations of the process
+
+"""
+
+N = 500
+
+"""
+
+Length of each realization:
+
+"""
+
+L = 1024
+
+order = am.shape[0]
+n_lags = order + 1
+
+n_process = am.shape[-1]
+
+z = np.empty((N, n_process, L))
+nz = np.empty((N, n_process, L))
+
+np.random.seed(1981)
+for i in xrange(N):
+ z[i], nz[i] = utils.generate_mar(am, cov, L)
+
+
+"""
+
+We start by estimating the order of the model from the data:
+
+"""
+
+est_order = []
+for i in xrange(N):
+ this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0], z[i][1])
+ est_order.append(this_order)
+
+order = int(np.round(np.mean(est_order)))
+
+"""
+
+Once we have estimated the order, we go ahead and fit each realization of the
+MAR model, constraining the model order accordingly (by setting the order
+key-word argument) to be always equal to the model order estimated above.
+
+"""
+
+Rxx = np.empty((N, n_process, n_process, n_lags))
+coef = np.empty((N, n_process, n_process, order))
+ecov = np.empty((N, n_process, n_process))
+
+for i in xrange(N):
+ this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0], z[i][1], order=order)
+ Rxx[i] = this_Rxx
+ coef[i] = this_coef
+ ecov[i] = this_ecov
+
+"""
+
+We generate a time-series from the recovered coefficients, using the same
+randomization seed as the first mar. These should look pretty similar to each other:
+
+"""
+
+np.random.seed(1981)
+est_ts, _ = utils.generate_mar(np.mean(coef, axis=0), np.mean(ecov, axis=0), L)
+
+fig01 = plt.figure()
+ax = fig01.add_subplot(1, 1, 1)
+
+ax.plot(est_ts[0][0:100])
+ax.plot(z[0][0][0:100], 'g--')
+
+"""
+
+.. image:: fig/ar_model_fit_01.png
+
+
+"""
+
+plt.show()
+
+"""
+
+.. [Ding2006] M. Ding, Y. Chen and S.L. Bressler (2006) Granger causality:
+ basic theory and application to neuroscience. In Handbook of Time Series
+ Analysis, ed. B. Schelter, M. Winterhalder, and J. Timmer, Wiley-VCH
+ Verlage, 2006: 451-474
+
+"""
diff --git a/doc/examples/event_related_fmri.py b/doc/examples/event_related_fmri.py
new file mode 100644
index 0000000..1aaa597
--- /dev/null
+++ b/doc/examples/event_related_fmri.py
@@ -0,0 +1,147 @@
+"""
+
+.. _et-fmri:
+
+==================
+Event-related fMRI
+==================
+
+Extracting the average time-series from one signal, time-locked to the
+occurence of some type of event in another signal is a very typical operation
+in the analysis of time-series from neuroscience experiments. Therefore, we
+have an additional example of this kind of analysis in :ref:`grasshopper`
+
+The following example is taken from an fMRI experiment in which a subject was
+viewing a motion stimulus, while fMRI BOLD was recorded. The time-series in
+this data set were extracted from motion-sensitive voxels near area MT (a
+region containing motion-sensitive cells) in this subject's brain. 6 different
+kinds of trials could occur in this experiment (designating different
+directions and locations of motion). The following example shows the extraction
+of the time-dependent responses of the voxels in this region to the different
+stimuli.
+
+We start by importing modules/functions used and define some variables we will
+use in the analysis:
+
+"""
+
+import os
+
+from matplotlib.mlab import csv2rec
+import matplotlib.pyplot as plt
+
+import nitime
+import nitime.timeseries as ts
+import nitime.analysis as nta
+import nitime.viz as viz
+
+TR = 2.
+len_et = 15 # This is given in number of samples, not time!
+
+"""
+
+Next, we load the data into a recarray from the csv file, using csv2rec
+
+"""
+
+data_path = os.path.join(nitime.__path__[0], 'data')
+
+data = csv2rec(os.path.join(data_path, 'event_related_fmri.csv'))
+
+
+"""
+
+We initialize TimeSeries objects with the data and the TR:
+
+One TimeSeries is initialized for the BOLD data:
+"""
+
+t1 = ts.TimeSeries(data.bold, sampling_interval=TR)
+
+"""
+
+And another one for the events (the different stimuli):
+
+"""
+
+t2 = ts.TimeSeries(data.events, sampling_interval=TR)
+
+"""
+
+Note that this example uses the EventRelated analyzer (also used in the
+:ref:`grasshopper` example), but here, instead of providing an :class:`Events`
+object as input, another :class:`TimeSeries` object is provided, containing an
+equivalent time-series with the same dimensions as the time-series on which the
+analysis is done, with '0' wherever no event of interest occured and an integer
+wherever an even of interest occured (sequential different integers for the
+different kinds of events).
+
+"""
+
+E = nta.EventRelatedAnalyzer(t1, t2, len_et)
+
+"""
+
+Two different methods of the EventRelatedAnalyzer are used: :attr:`E.eta`
+refers to the event-triggered average of the activity and :attr:`E.ets` refers
+to the event-triggered standard error of the mean (where the degrees of freedom
+are set by the number of trials). Note that you can also extract the
+event-triggered data itself as a list, by referring instead to
+:attr:`E.et_data`.
+
+We pass the eta and ets calculations straight into the visualization function,
+which plots the result:
+
+"""
+
+fig01 = viz.plot_tseries(E.eta, ylabel='BOLD (% signal change)', yerror=E.ets)
+
+"""
+
+.. image:: fig/event_related_fmri_01.png
+
+
+In the following example an alternative approach is taken to calculating
+the event-related activity, based on the finite impulse-response
+model (see [Burock2000]_ for details)
+
+
+"""
+
+fig02 = viz.plot_tseries(E.FIR, ylabel='BOLD (% signal change)')
+
+
+"""
+
+.. image:: fig/event_related_fmri_02.png
+
+Yet another method is based on a cross-correlation performed in the frequency
+domain (thanks to Lavi Secundo for providing a previous implementation of this
+idea). This method can speed up calculation substantially for long time-series,
+because the calculation is done using a vector multiplication in the frequency
+domain representation of the time-series, instead of a more computationally
+expensive convolution-like operation
+
+"""
+
+fig03 = viz.plot_tseries(E.xcorr_eta, ylabel='BOLD (% signal change)')
+
+
+"""
+
+.. image:: fig/event_related_fmri_03.png
+
+
+We call plt.show() in order to display all the figures:
+"""
+
+plt.show()
+
+"""
+
+.. [Burock2000] M.A. Burock and A.M.Dale (2000). Estimation and Detection of
+ Event-Related fMRI Signals with Temporally Correlated Noise: A
+ Statistically Efficient and Unbiased Approach. Human Brain Mapping,
+ 11:249-260
+
+"""
diff --git a/doc/examples/filtering_fmri.py b/doc/examples/filtering_fmri.py
new file mode 100644
index 0000000..39cd99b
--- /dev/null
+++ b/doc/examples/filtering_fmri.py
@@ -0,0 +1,436 @@
+"""
+
+.. _filter-fmri:
+
+===================================
+Filtering and normalizing fMRI data
+===================================
+
+Filtering fMRI data is very important. The time-series usually collected in
+fMRI contain a broad-band signal. However, physilogically relevant signals are
+thought to be present in only particular parts of the spectrum. For this
+reason, filtering operations, such as detrending, are a common pre-processing
+step in analysis of fMRI data analysis. In addition, for many fMRI analyses, it
+is important to normalize the data in each voxel. This is because data may
+differ between different voxels for 'uninteresting' reasons, such as local
+blood-flow differences and signal amplitude differences due to the distance
+from the receive coil. In the following, we will demonstrate usage of nitimes
+analyzer objects for spectral estimation, filtering and normalization on fMRI
+data.
+
+
+We start by importing the needed modules. First modules from the standard lib
+and from 3rd parties:
+
+"""
+
+import os
+
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.mlab import csv2rec
+
+
+"""
+
+Next, the particular nitime classes we will be using in this example:
+
+"""
+
+import nitime
+
+# Import the time-series objects:
+from nitime.timeseries import TimeSeries
+
+# Import the analysis objects:
+from nitime.analysis import SpectralAnalyzer, FilterAnalyzer, NormalizationAnalyzer
+
+"""
+
+For starters, let's analyze data that has been preprocessed and is extracted
+into indivudal ROIs. This is the same data used in :ref:`multi-taper-coh` and
+in :ref:`resting-state` (see these examples for details).
+
+We start by setting the TR and reading the data from the CSV table into which
+the data was saved:
+
+"""
+
+TR = 1.89
+
+data_path = os.path.join(nitime.__path__[0], 'data')
+
+data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
+
+# Extract ROI information from the csv file headers:
+roi_names = np.array(data_rec.dtype.names)
+
+# This is the number of samples in each ROI:
+n_samples = data_rec.shape[0]
+
+# Make an empty container for the data
+data = np.zeros((len(roi_names), n_samples))
+
+# Insert the data from each ROI into a row in the data:
+for n_idx, roi in enumerate(roi_names):
+ data[n_idx] = data_rec[roi]
+
+# Initialize TimeSeries object:
+T = TimeSeries(data, sampling_interval=TR)
+T.metadata['roi'] = roi_names
+
+
+"""
+
+We will start, by examining the spectrum of the original data, before
+filtering. We do this by initializing a SpectralAnalyzer for the original data:
+
+"""
+
+S_original = SpectralAnalyzer(T)
+
+# Initialize a figure to put the results into:
+fig01 = plt.figure()
+ax01 = fig01.add_subplot(1, 1, 1)
+
+
+"""
+
+The spectral analyzer has several different methods of spectral analysis,
+however the all have a common API. This means that for all of them the output
+is a tuple. The first item in the tuple are the central frequencies of the
+frequency bins in the spectrum and the second item in the tuple are the
+magnitude of the spectrum in that frequency bin. For the purpose of this
+example, we will only plot the data from the 10th ROI (by indexing into the
+spectra). We compare all the methods of spectral estimation by plotting them
+together:
+
+"""
+
+ax01.plot(S_original.psd[0],
+ S_original.psd[1][9],
+ label='Welch PSD')
+
+ax01.plot(S_original.spectrum_fourier[0],
+ np.abs(S_original.spectrum_fourier[1][9]),
+ label='FFT')
+
+ax01.plot(S_original.periodogram[0],
+ S_original.periodogram[1][9],
+ label='Periodogram')
+
+ax01.plot(S_original.spectrum_multi_taper[0],
+ S_original.spectrum_multi_taper[1][9],
+ label='Multi-taper')
+
+ax01.set_xlabel('Frequency (Hz)')
+ax01.set_ylabel('Power')
+
+ax01.legend()
+
+
+"""
+
+.. image:: fig/filtering_fmri_01.png
+
+
+Notice that, for this data, simply extracting a FFT is hardly informative (the
+reasons for that are explained in :ref:`multi-taper-psd`). On the other hand,
+the other methods provide different granularity of information, traded-off with
+the robustness of the estimation. The cadillac of spectral estimates is the
+multi-taper estimation, which provides both robustness and granularity, but
+notice that this estimate requires more computation than other estimates
+(certainly more estimates than the FFT).
+
+We note that a lot of the power in the fMRI data seems to be concentrated in
+frequencies below 0.02 Hz. These extremely low fluctuations in signal are often
+considered to be 'noise', rather than reflecting neural processing. In
+addition, there is a broad distribution of power up to the Nyquist
+frequency. However, some estimates of the hemodynamic response suggest that
+information above 0.15 could not reflect the slow filtering of neural response
+to the BOLD response measured in fMRI. Thus, it would be advantageous to remove
+fluctuations below 0.02 and above 0.15 Hz from the data. Next, we proceed to
+filter the data into this range, using different methods.
+
+We start by initializing a FilterAnalyzer. This is initialized with the
+time-series containing the data and with the upper and lower bounds of the
+range into which we wish to filter (in Hz):
+
+"""
+
+F = FilterAnalyzer(T, ub=0.15, lb=0.02)
+
+# Initialize a figure to display the results:
+fig02 = plt.figure()
+ax02 = fig02.add_subplot(1, 1, 1)
+
+# Plot the original, unfiltered data:
+ax02.plot(F.data[0], label='unfiltered')
+
+"""
+
+As with the SpectralAnalyzer, there is a common API for the different methods
+used for filtering. We use the following methods:
+
+- Boxcar filter: The time-series is convolved with a box-car function of the
+ right length to smooth the data to such an extent that the frequencies higher
+ than represented by the length of this box-car function are no longer present
+ in the smoothed version of the time-series. This functions as a low-pass filter. The
+ data can then be high-pass filtered by subtracting this version of the data
+ from the original. For a band-pass filter, both of these operations are done.
+
+"""
+
+ax02.plot(F.filtered_boxcar.data[0], label='Boxcar filter')
+
+"""
+
+- FIR filter: A digital filter with a finite impulse response. These filters
+ have an order of 64 per default, but that can be adjusted by setting the key
+ word argument 'filt_order', passed to initialize the FilterAnalyzer. For
+ FIR filtering, :mod:`nitime` uses a Hamming window filter, but this can also
+ be changed by setting the key word argument 'fir_win'.
+ As with the boxcar filter, if band-pass filtering is required, a low-pass
+ filter is applied and then a high-pass filter is applied to the resulting
+ time-series.
+
+"""
+
+ax02.plot(F.fir.data[0], label='FIR')
+
+"""
+
+- IIR filter: A digital filter with an infinite impulse response function. Per
+ default an elliptic filter is used here, but this can be changed, by setting
+ the 'iir_type' key word argument used when initializing the FilterAnalyzer.
+
+For both FIR filters and IIR filters, :func:`scipy.signal.filtfilt` is used in
+order to achieve zero phase delay filtering.
+
+"""
+
+ax02.plot(F.iir.data[0], label='IIR')
+
+"""
+
+- Fourier filter: this is a quick and dirty filter. The data is FFT-ed into the
+ frequency domain. The power in the unwanted frequency bins is removed (by
+ replacing the power in these bins with zero) and the data is IFFT-ed back
+ into the time-domain.
+
+"""
+
+ax02.plot(F.filtered_fourier.data[0], label='Fourier')
+ax02.legend()
+ax02.set_xlabel('Time (TR)')
+ax02.set_ylabel('Signal amplitude (a.u.)')
+
+"""
+
+.. image:: fig/filtering_fmri_02.png
+
+
+Examining the resulting time-series closely reveals that large fluctuations in
+very slow frequencies have been removed, but also small fluctuations in high
+frequencies have been attenuated through filtering.
+
+Comparing the resulting spectra of these different filters shows the various
+trade-offs of each filtering method, including the fidelity with which the
+original spectrum is replicated within the pass-band and the amount of
+attenuation within the stop-bands.
+
+We can do that by initializng a SpectralAnalyzer for each one of the filtered
+time-series resulting from the above operation and plotting their spectra. For
+ease of compariso, we only plot the spectra using the multi-taper spectral
+estimation. At the level of granularity provided by this method, the diferences
+between the methods are emphasized:
+
+"""
+
+S_fourier = SpectralAnalyzer(F.filtered_fourier)
+S_boxcar = SpectralAnalyzer(F.filtered_boxcar)
+S_fir = SpectralAnalyzer(F.fir)
+S_iir = SpectralAnalyzer(F.iir)
+
+fig03 = plt.figure()
+ax03 = fig03.add_subplot(1, 1, 1)
+
+ax03.plot(S_original.spectrum_multi_taper[0],
+ S_original.spectrum_multi_taper[1][9],
+ label='Original')
+
+ax03.plot(S_fourier.spectrum_multi_taper[0],
+ S_fourier.spectrum_multi_taper[1][9],
+ label='Fourier')
+
+ax03.plot(S_boxcar.spectrum_multi_taper[0],
+ S_boxcar.spectrum_multi_taper[1][9],
+ label='Boxcar')
+
+ax03.plot(S_fir.spectrum_multi_taper[0],
+ S_fir.spectrum_multi_taper[1][9],
+ label='FIR')
+
+ax03.plot(S_iir.spectrum_multi_taper[0],
+ S_iir.spectrum_multi_taper[1][9],
+ label='IIR')
+
+ax03.legend()
+
+
+"""
+
+.. image:: fig/filtering_fmri_03.png
+
+
+Next, we turn to normalize the filtered data. This can be done in one of two
+methods:
+
+- Percent change: the data in each voxel is normalized as percent signal
+ change, relative to the mean BOLD signal in the voxel
+
+- Z score: The data in each voxel is normalized to have 0 mean and a standard
+ deviation of 1.
+
+We will use the filtered data, in order to demonstrate how the output of one
+analyzer can be used as the input to the other:
+
+"""
+
+fig04 = plt.figure()
+ax04 = fig04.add_subplot(1, 1, 1)
+
+ax04.plot(NormalizationAnalyzer(F.fir).percent_change.data[0], label='% change')
+ax04.plot(NormalizationAnalyzer(F.fir).z_score.data[0], label='Z score')
+ax04.legend()
+ax04.set_xlabel('Time (TR)')
+ax04.set_ylabel('Amplitude (% change or Z-score)')
+
+"""
+
+.. image:: fig/filtering_fmri_04.png
+
+
+Notice that the same methods of filtering and normalization can be applied to
+fMRI data, upon reading it from a nifti file, using :mod:`nitime.fmri.io`.
+
+We demonstrate that in what follows.[Notice that nibabel
+(http://nipy.org/nibabel) is required in order to run the following
+examples. An error will be thrown if nibabel is not installed]
+
+"""
+
+try:
+ from nibabel import load
+except ImportError:
+ raise ImportError('You need nibabel (http:/nipy.org/nibabel/) in order to run this example')
+
+import nitime.fmri.io as io
+
+"""
+
+We define the TR of the analysis and the frequency band of interest:
+
+"""
+
+TR = 1.35
+f_lb = 0.02
+f_ub = 0.15
+
+
+"""
+
+An fMRI data file with some fMRI data is shipped as part of the distribution,
+the following line will find the path to this data on the specific computer:
+
+"""
+
+data_file_path = test_dir_path = os.path.join(nitime.__path__[0],
+ 'data')
+
+fmri_file = os.path.join(data_file_path, 'fmri1.nii.gz')
+
+
+"""
+
+Read in the dimensions of the data, using nibabel:
+
+"""
+
+fmri_data = load(fmri_file)
+volume_shape = fmri_data.shape[:-1]
+coords = list(np.ndindex(volume_shape))
+coords = np.array(coords).T
+
+
+"""
+
+We use :mod:`nitime.fmri.io` in order to generate a TimeSeries object from spatial
+coordinates in the data file. Notice that normalization method is provided as a
+string input to the keyword argument 'normalize' and the filter and its
+properties are provided as a dict to the keyword argument 'filter':
+
+"""
+
+T_unfiltered = io.time_series_from_file(fmri_file,
+ coords,
+ TR=TR,
+ normalize='percent')
+
+T_fir = io.time_series_from_file(fmri_file,
+ coords,
+ TR=TR,
+ normalize='percent',
+ filter=dict(lb=f_lb,
+ ub=f_ub,
+ method='fir',
+ filt_order=10))
+
+T_iir = io.time_series_from_file(fmri_file,
+ coords,
+ TR=TR,
+ normalize='percent',
+ filter=dict(lb=f_lb,
+ ub=f_ub,
+ method='iir',
+ filt_order=10))
+
+T_boxcar = io.time_series_from_file(fmri_file,
+ coords,
+ TR=TR,
+ normalize='percent',
+ filter=dict(lb=f_lb,
+ ub=f_ub,
+ method='boxcar',
+ filt_order=10))
+
+fig05 = plt.figure()
+ax05 = fig05.add_subplot(1, 1, 1)
+S_unfiltered = SpectralAnalyzer(T_unfiltered).spectrum_multi_taper
+S_fir = SpectralAnalyzer(T_fir).spectrum_multi_taper
+S_iir = SpectralAnalyzer(T_iir).spectrum_multi_taper
+S_boxcar = SpectralAnalyzer(T_boxcar).spectrum_multi_taper
+
+random_voxel = np.random.randint(0, np.prod(volume_shape))
+
+ax05.plot(S_unfiltered[0], S_unfiltered[1][random_voxel], label='Unfiltered')
+ax05.plot(S_fir[0], S_fir[1][random_voxel], label='FIR filtered')
+ax05.plot(S_iir[0], S_iir[1][random_voxel], label='IIR filtered')
+ax05.plot(S_boxcar[0], S_boxcar[1][random_voxel], label='Boxcar filtered')
+ax05.legend()
+
+"""
+
+.. image:: fig/filtering_fmri_05.png
+
+
+Notice that though the boxcar filter doesn't usually do an amazing job with
+long time-series and IIR/FIR filters seem to be superior in those cases, in
+this example, where the time-series is much shorter, it sometimes does a
+relatively decent job.
+
+We call plt.show() in order to display the figure:
+
+"""
+
+plt.show()
diff --git a/doc/examples/granger_fmri.py b/doc/examples/granger_fmri.py
new file mode 100644
index 0000000..8c2b958
--- /dev/null
+++ b/doc/examples/granger_fmri.py
@@ -0,0 +1,226 @@
+"""
+
+.. gc-fmri
+
+================================
+Granger 'causality' of fMRI data
+================================
+
+Granger 'causality' analysis provides an asymmetric measure of the coupling
+between two time-series. When discussing this analysis method, we will put the
+word 'causality' in single quotes, as we believe that use of this word outside
+of quotes should be reserved for particular circumstances, often not fulfilled
+in the analysis of simultaneously recorder neuroscientific time-series (see
+[Pearl2009]_ for an extensive discussion of this distinction).
+
+The central idea behind this analysis is that time-series can be described in
+terms of a time-delayed auto-regressive model of the form:
+
+.. math::
+
+ x_t = \sum_{i=1}^{n}a_i x_{t-i} + \epsilon_t
+
+Here, a the past behaviour of a single time-series is used in order to predict
+the current value of the time-series. In Granger 'causality' analysis, we test
+whether the addition of a prediction of the time-series from another
+time-series through a multi-variate auto-regressive model may improve our
+prediction of the present behavior of the time-series (reducing the value of
+the error term $\epsilon_t$):
+
+.. math::
+
+ x_t = \sum_{i=1}^{n}a_i x_{t-i} + b_i y_{t-i} + \epsilon_t
+
+
+In our implementation of the algorithms used for this analysis, we follow
+closely the description put forth by Ding et al. ([Ding2006]_). Also, see
+:ref:`mar` and :ref:`ar` for examples even more closely modeled on the
+examples mentioned in their paper.
+
+Here, we will demonstrate the use of Granger 'causality' analysis with fMRI
+data. The data is provided as part of the distribution and is taken from a
+'resting state' scan. The data was motion corrected and averaged from several
+ROIs.
+
+We start by importing the needed modules:
+
+"""
+
+import os
+
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.mlab import csv2rec
+
+import nitime
+import nitime.analysis as nta
+import nitime.timeseries as ts
+import nitime.utils as tsu
+from nitime.viz import drawmatrix_channels
+
+"""
+
+We then define a few parameters of the data: the TR and the bounds on the
+frequency band of interest.
+
+"""
+
+TR = 1.89
+f_ub = 0.15
+f_lb = 0.02
+
+"""
+
+We read in the resting state fMRI data into a recarray from a csv file:
+
+"""
+
+data_path = os.path.join(nitime.__path__[0], 'data')
+
+data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
+
+roi_names = np.array(data_rec.dtype.names)
+nseq = len(roi_names)
+n_samples = data_rec.shape[0]
+data = np.zeros((nseq, n_samples))
+
+for n_idx, roi in enumerate(roi_names):
+ data[n_idx] = data_rec[roi]
+
+"""
+
+We normalize the data in each of the ROIs to be in units of % change and
+initialize the TimeSeries object:
+
+"""
+
+pdata = tsu.percent_change(data)
+time_series = ts.TimeSeries(pdata, sampling_interval=TR)
+
+"""
+
+We initialize the GrangerAnalyzer object, while specifying the order of the
+autoregressive model to be 1 (predict the current behavior of the time-series
+based on one time-point back).
+
+"""
+
+G = nta.GrangerAnalyzer(time_series, order=1)
+
+"""
+
+For comparison, we also initialize a CoherenceAnalyzer and a
+CorrelationAnalyzer, with the same TimeSeries object
+
+"""
+
+C1 = nta.CoherenceAnalyzer(time_series)
+C2 = nta.CorrelationAnalyzer(time_series)
+
+"""
+
+We are only interested in the physiologically relevant frequency band
+(approximately 0.02 to 0.15 Hz).
+
+The spectral resolution is different in these two different analyzers. In the
+CoherenceAnalyzer, the spectral resolution depends on the size of the window
+used for calculating the spectral density and cross-spectrum, whereas in the
+GrangerAnalyzer it is derived, as determined by the user, from the MAR model
+used.
+
+For this reason, the indices used to access the relevant part of the spectrum
+will be different in the different analyzers.
+
+"""
+
+freq_idx_G = np.where((G.frequencies > f_lb) * (G.frequencies < f_ub))[0]
+freq_idx_C = np.where((C1.frequencies > f_lb) * (C1.frequencies < f_ub))[0]
+
+
+"""
+
+We plot the 'causality' from x to y ($F_{x \rightarrow y}$) and from y to x
+($F_{y \rightarrow x}$ for the first two ROIs and compare to the coherence
+between these two time-series:
+
+"""
+
+coh = np.mean(C1.coherence[:, :, freq_idx_C], -1) # Averaging on the last dimension
+g1 = np.mean(G.causality_xy[:, :, freq_idx_G], -1)
+
+fig01 = drawmatrix_channels(coh, roi_names, size=[10., 10.], color_anchor=0)
+
+"""
+
+.. image:: fig/granger_fmri_01.png
+
+"""
+
+fig02 = drawmatrix_channels(C2.corrcoef, roi_names, size=[10., 10.], color_anchor=0)
+
+"""
+
+.. image:: fig/granger_fmri_02.png
+
+"""
+
+fig03 = drawmatrix_channels(g1, roi_names, size=[10., 10.], color_anchor=0)
+
+"""
+
+.. image:: fig/granger_fmri_03.png
+
+Differences in the HRF between different ROIs are a potential source of
+misattribution of the direction and magnitude of dependence between time-series
+in fMRI data (for a particularly extreme example of that see
+[David2008]_). Therefore, as suggested by Roebroeck et al. [Roebroeck2005]_ and
+[Kayser2009]_ we turn to examine the difference between $F_{x\rightarrow y}$ and
+$F_{y\rightarrow x}$
+
+"""
+
+g2 = np.mean(G.causality_xy[:, :, freq_idx_G] - G.causality_yx[:, :, freq_idx_G], -1)
+fig04 = drawmatrix_channels(g2, roi_names, size=[10., 10.], color_anchor=0)
+
+"""
+
+.. image:: fig/granger_fmri_04.png
+
+If these values are found to be significantly different than 0, this
+constitutes evidence for a correlation with a time-lag between the
+regions. This is a necessary (though not necessarily sufficient...) condition
+for establishing functional connectivity between the regions.
+
+Finally, we call plt.show(), to show the plots created:
+
+"""
+
+plt.show()
+
+"""
+
+References
+----------
+
+.. [Pearl2009] J. Pearl (2009). Causal inference in statistics: An
+ overview. Statistics surveys 3: 96-146.
+
+.. [Ding2008] M. Ding, Y. Chen, S.L. Bressler (2006) Granger causality:
+ basic theory and application to neuroscience. In Handbook of Time Series
+ Analysis, ed. B. Schelter, M. Winterhalder, and J. Timmer, Wiley-VCH
+ Verlage, 2006: 451-474
+
+.. [Roebroeck2005] A. Roebroeck, E., Formisano R. Goebel (2005). Mapping
+ directed influence over the brain using Granger causality and
+ fMRI. NeuroImage 25: 230-242.
+
+.. [Kayser2009] A. Kayser, F. Sun, M. D'Esposito (2009). A comparison of
+ Granger causality and coherency in fMRI-based analysis of the motor
+ system. NeuroImage 30: 3475-94
+
+.. [David2008] O. David, I. Guillemain, S. Saillet, S. Reyt, C. Deransart,
+ C. Segebarth, A. Depaulis (2008). Identifying neural drivers with functional
+ MRI: An electrophysiological validation. PLoS Biol 6:e315
+
+
+"""
diff --git a/doc/examples/grasshopper.py b/doc/examples/grasshopper.py
new file mode 100644
index 0000000..38b6fff
--- /dev/null
+++ b/doc/examples/grasshopper.py
@@ -0,0 +1,221 @@
+"""
+
+.. _grasshopper:
+
+
+=====================================
+ Auditory processing in grasshoppers
+=====================================
+
+Extracting the average time-series from one signal, time-locked to the
+occurence of some type of event in another signal is a very typical operation in
+the analysis of time-series from neuroscience experiments. Therefore, we have
+an additional example of this kind of analysis in :ref:`et-fmri`
+
+
+In the following code-snippet, we demonstrate the calculation of the
+spike-triggered average (STA). This is the average of the stimulus wave-form
+preceding the emission of a spike in the neuron and can be thought of as the
+stimulus 'preferred' by this neuron.
+
+We start by importing the required modules:
+"""
+
+import os
+
+import numpy as np
+
+import nitime
+import nitime.timeseries as ts
+import nitime.analysis as tsa
+import nitime.viz as viz
+from matplotlib import pyplot as plt
+
+"""
+
+Two data files are used in this example. The first contains the times of action
+potentials ('spikes'), recorded intra-cellularly from primary auditory
+receptors in the grasshopper *Locusta Migratoria*.
+
+We read in these times and initialize an Events object from them. The
+spike-times are given in micro-seconds:
+
+"""
+
+data_path = os.path.join(nitime.__path__[0], 'data')
+
+spike_times = np.loadtxt(os.path.join(data_path, 'grasshopper_spike_times1.txt'))
+
+spike_ev = ts.Events(spike_times, time_unit='us')
+
+
+"""
+
+The first data file contains the stimulus that was played during the
+recording. Briefly, the stimulus played was a pure-tone in the cell's preferred
+frequency amplitude modulated by Gaussian white-noise, up to a cut-off
+frequency (200 Hz in this case, for details on the experimental procedures and
+the stimulus see [Rokem2006]_).
+
+"""
+
+stim = np.loadtxt(os.path.join(data_path, 'grasshopper_stimulus1.txt'))
+
+
+"""
+
+The stimulus needs to be transformed from Volts to dB:
+
+"""
+
+
+def volt2dB(stim, maxdB=100):
+ stim = (20 * 1 / np.log(10)) * (np.log(stim[:, 1] / 2.0e-5))
+ return maxdB - stim.max() + stim
+
+stim = volt2dB(stim, maxdB=76.4286) # maxdB taken from the spike file header
+
+
+"""
+
+We create a time-series object for the stimulus, which was sampled at 20 kHz:
+
+"""
+
+stim_time_series = ts.TimeSeries(t0=0,
+ data=stim,
+ sampling_interval=50,
+ time_unit='us')
+
+"""
+
+Note that the time-representation will not change if we now convert the
+time-unit into ms. The only thing this accomplishes is to use this time-unit in
+subsequent visualization of the resulting time-series
+
+"""
+
+stim_time_series.time_unit = 'ms'
+
+"""
+
+Next, we initialize an EventRelatedAnalyzer:
+
+"""
+
+event_related = tsa.EventRelatedAnalyzer(stim_time_series,
+ spike_ev,
+ len_et=200,
+ offset=-200)
+
+"""
+
+The actual STA gets calculated in this line (the call to 'event_related.eta')
+and the result gets input directly into the plotting function:
+
+"""
+
+fig01 = viz.plot_tseries(event_related.eta, ylabel='Amplitude (dB SPL)')
+
+"""
+
+We prettify the plot a bit by adding a dashed line at the mean of the stimulus
+
+"""
+
+ax = fig01.get_axes()[0]
+xlim = ax.get_xlim()
+ylim = ax.get_ylim()
+mean_stim = np.mean(stim_time_series.data)
+ax.plot([xlim[0], xlim[1]], [mean_stim, mean_stim], 'k--')
+
+
+"""
+
+.. image:: fig/grasshopper_01.png
+
+In the following example, a second channel has been added to both the stimulus
+and the spike-train time-series. This is the response of the same cell, to a
+different stimulus, in which the frequency modulation has a higher frequency
+cut-off (800 Hz).
+
+"""
+
+
+stim2 = np.loadtxt(os.path.join(data_path, 'grasshopper_stimulus2.txt'))
+stim2 = volt2dB(stim2, maxdB=76.4286)
+spike_times2 = np.loadtxt(os.path.join(data_path, 'grasshopper_spike_times2.txt'))
+
+
+"""
+
+
+We loop over the two spike-time events and stimulus time-series:
+
+
+"""
+
+
+et = []
+means = []
+for stim, spike in zip([stim, stim2], [spike_times, spike_times2]):
+ stim_time_series = ts.TimeSeries(t0=0, data=stim, sampling_interval=50,
+ time_unit='us')
+
+ stim_time_series.time_unit = 'ms'
+
+ spike_ev = ts.Events(spike, time_unit='us')
+ #Initialize the event-related analyzer
+ event_related = tsa.EventRelatedAnalyzer(stim_time_series,
+ spike_ev,
+ len_et=200,
+ offset=-200)
+
+ """
+
+ This is the line which actually executes the analysis
+
+ """
+
+ et.append(event_related.eta)
+ means.append(np.mean(stim_time_series.data))
+
+"""
+
+Stack the data from both time-series, initialize a new time-series and plot it:
+
+"""
+
+fig02 = viz.plot_tseries(
+ ts.TimeSeries(data=np.vstack([et[0].data, et[1].data]),
+ sampling_rate=et[0].sampling_rate, time_unit='ms'))
+
+ax = fig02.get_axes()[0]
+xlim = ax.get_xlim()
+ax.plot([xlim[0], xlim[1]], [means[0], means[0]], 'b--')
+ax.plot([xlim[0], xlim[1]], [means[1], means[1]], 'g--')
+
+
+"""
+
+.. image:: fig/grasshopper_02.png
+
+
+plt.show() is called in order to display the figures
+
+"""
+
+plt.show()
+
+"""
+
+The data used in this example is also available on the `CRCNS data sharing
+web-site <http://crcns.org/>`_.
+
+
+.. [Rokem2006] Ariel Rokem, Sebastian Watzl, Tim Gollisch, Martin Stemmler,
+ Andreas V M Herz and Ines Samengo (2006). Spike-timing precision
+ underlies the coding efficiency of auditory receptor neurons. J
+ Neurophysiol, 95:2541--52
+
+"""
diff --git a/doc/examples/mtm_baseband_power.py b/doc/examples/mtm_baseband_power.py
new file mode 100644
index 0000000..4fd1525
--- /dev/null
+++ b/doc/examples/mtm_baseband_power.py
@@ -0,0 +1,178 @@
+"""
+
+.. _multi-taper-baseband-power:
+
+===========================================
+Multitaper method for baseband demodulation
+===========================================
+
+Another application of the Slepian functions is to estimate the
+complex demodulate of a narrowband signal. This signal is normally of
+interest in neuroimaging when finding the lowpass power envelope and the
+instantaneous phase. The traditional technique uses the Hilbert
+transform to find the analytic signal. However, this approach suffers
+problems of bias and reliability, much like the periodogram suffers in
+PSD estimation. Once again, a multi-taper approach can provide an
+estimate with lower variance.
+
+The following demonstrates the use of spectra of multiple windows to
+compute a power envelope of a signal in a desired band.
+
+"""
+
+import numpy as np
+import scipy.signal as signal
+import nitime.algorithms as nt_alg
+import nitime.utils as nt_ut
+import matplotlib.pyplot as pp
+
+"""
+We'll set up a test signal with a red spectrum (integrated Gaussian
+noise).
+"""
+
+N = 10000
+nfft = np.power( 2, int(np.ceil(np.log2(N))) )
+NW = 40
+W = float(NW)/N
+
+"""
+Create a nearly lowpass band-limited signal.
+"""
+
+s = np.cumsum( np.random.randn(N) )
+
+"""
+Strictly enforce the band-limited property in this signal.
+"""
+
+(b, a) = signal.butter(3, W, btype='lowpass')
+slp = signal.lfilter(b, a, s)
+
+"""
+Modulate both signals away from baseband.
+"""
+
+s_mod = s * np.cos(2*np.pi*np.arange(N) * float(200) / N)
+slp_mod = slp * np.cos(2*np.pi*np.arange(N) * float(200) / N)
+fm = int( np.round(float(200) * nfft / N) )
+
+"""
+Create Slepians with the desired bandpass resolution (2W).
+"""
+
+(dpss, eigs) = nt_alg.dpss_windows(N, NW, 2*NW)
+keep = eigs > 0.9
+dpss = dpss[keep]; eigs = eigs[keep]
+
+"""
+
+Test 1
+------
+
+We'll compare multitaper baseband power estimation with regular
+Hilbert transform method under actual narrowband conditions.
+"""
+
+# MT method
+xk = nt_alg.tapered_spectra(slp_mod, dpss, NFFT=nfft)
+mtm_bband = np.sum( 2 * (xk[:,fm] * np.sqrt(eigs))[:,None] * dpss, axis=0 )
+
+# Hilbert transform method
+hb_bband = signal.hilbert(slp_mod, N=nfft)[:N]
+
+pp.figure()
+pp.subplot(211)
+pp.plot(slp_mod, 'g')
+pp.plot(np.abs(mtm_bband), color='b', linewidth=3)
+pp.title('Multitaper Baseband Power')
+
+pp.subplot(212)
+pp.plot(slp_mod, 'g')
+pp.plot(np.abs(hb_bband), color='b', linewidth=3)
+pp.title('Hilbert Baseband Power')
+pp.gcf().tight_layout()
+
+"""
+
+.. image:: fig/mtm_baseband_power_01.png
+
+We see in the narrowband signal case that there's not much difference
+between taking the Hilbert transform and calculating the multitaper
+complex demodulate.
+
+"""
+
+"""
+
+Test 2
+------
+
+Now we'll compare multitaper baseband power estimation with regular
+Hilbert transform method under more realistic non-narrowband
+conditions.
+
+"""
+
+# MT method
+xk = nt_alg.tapered_spectra(s_mod, dpss, NFFT=nfft)
+w, n = nt_ut.adaptive_weights(xk, eigs, sides='onesided')
+mtm_bband = np.sum( 2 * (xk[:,fm] * np.sqrt(eigs))[:,None] * dpss, axis=0 )
+
+# Hilbert transform method
+hb_bband = signal.hilbert(s_mod, N=nfft)[:N]
+
+pp.figure()
+pp.subplot(211)
+pp.plot(s_mod, 'g')
+pp.plot(np.abs(mtm_bband), color='b', linewidth=3)
+pp.title('Multitaper Baseband Power')
+
+pp.subplot(212)
+pp.plot(s_mod, 'g')
+pp.plot(np.abs(hb_bband), color='b', linewidth=3)
+pp.title('Hilbert Baseband Power')
+pp.gcf().tight_layout()
+
+"""
+
+.. image:: fig/mtm_baseband_power_02.png
+
+Here we see that since the underlying signal is not truly narrowband,
+the broadband bias is corrupting the Hilbert transform estimation of
+the complex demodulate. However the multi-taper estimate clearly
+remains lowpass.
+
+"""
+
+"""
+Another property of computing the complex demodulate from the spectra
+of multiple windows is that all bandpasses can be computed. In the
+above examples, we were only taking a slice from the modulation
+frequency that we set up. In practice, we might be interested in
+bandpasses at various frequencies. Note here, though, that our
+bandwidth is set by the Slepian sequences we used for analysis. The
+following plot shows a family of complex demodulates at frequencies
+near the modulation frequency.
+"""
+
+### Show a family of baseband demodulations from the multitaper method
+#eigen_coefs = 2 * (xk[:,(fm-100):(fm+100):10] * np.sqrt(eigs)[:,None])
+eigen_coefs = 2 * (xk[:,(fm-100):(fm+100):10] * \
+ np.sqrt(w[:,(fm-100):(fm+100):10]))
+mtm_fbband = np.sum( eigen_coefs[:,:,None] * dpss[:,None,:], axis=0 )
+
+pp.figure()
+pp.plot(s_mod, 'g')
+pp.plot(np.abs(mtm_fbband).T, linestyle='--', linewidth=2)
+pp.plot(np.abs(mtm_bband), color='b', linewidth=3)
+pp.title('Multitaper Baseband Power: Demodulation Freqs in (fm-100, fm+100)')
+pp.gcf().tight_layout()
+
+"""
+
+.. image:: fig/mtm_baseband_power_03.png
+
+"""
+
+pp.show()
diff --git a/doc/examples/mtm_harmonic_test.py b/doc/examples/mtm_harmonic_test.py
new file mode 100644
index 0000000..cd5a42d
--- /dev/null
+++ b/doc/examples/mtm_harmonic_test.py
@@ -0,0 +1,144 @@
+"""
+
+.. _multi-taper-harmonic-test:
+
+
+=========================================
+Multitaper F-test for harmonic components
+=========================================
+
+The Slepian sequences of the multitaper spectral estimation method can
+also be used to perform a hypothesis test regarding the presence of a
+pure sinusoid at any analyzed frequency. The F-test is used to assess
+whether the power at a given frequency can be attributed to a single
+line component. In this case, the power would be given by the
+summed spectral convolutions of the Slepian frequency functions with
+the line power spectrum, which is a dirac delta. The complex Fourier
+coefficient of the putative sinusoid is estimated through a linear
+regression of the Slepian DC components, and the strength of the
+regression coefficient is tested against the residual spectral power
+for the F-test.
+
+The following demonstrates the use of the harmonic test.
+
+"""
+
+import numpy as np
+import nitime.algorithms as nt_alg
+import nitime.utils as nt_ut
+import matplotlib.pyplot as pp
+
+"""
+We will set up a test signal with 3 harmonic components within
+Gaussian noise. The line components must be sufficiently resolved
+given the multi-taper bandwidth of 2NW.
+"""
+
+N = 10000
+fft_pow = int( np.ceil(np.log2(N) + 2) )
+NW = 4
+lines = np.sort(np.random.randint(100, 2**(fft_pow-6), size=(3,)))
+while np.any( np.diff(lines) < 2*NW ):
+ lines = np.sort(np.random.randint(2**(fft_pow-6), size=(3,)))
+lines = lines.astype('d')
+
+"""
+The harmonic test should find *exact* frequencies if they were to fall
+on the FFT grid. (Try commenting the following to see.) In the
+scenario of real sampled data, increasing the number of FFT points can
+help to locate the line components.
+"""
+
+lines += np.random.randn(3) # displace from grid locations
+
+"""
+Now proceed to specify the frequencies, phases, and amplitudes.
+"""
+
+lines /= 2.0**(fft_pow-2) # ensure they are well separated
+
+phs = np.random.rand(3) * 2 * np.pi
+amps = np.sqrt(2)/2 + np.abs( np.random.randn(3) )
+
+"""
+Set the RMS noise power here. Strategies to detect harmonics in low
+SNR include improving the reliability of the spectral estimate
+(increasing NW) and/or increasing the number of FFT points. Note that
+the former option will limit the ability to resolve lines at nearby
+frequencies.
+"""
+
+nz_sig = 1
+
+tx = np.arange(N)
+harmonics = amps[:,None]*np.cos( 2*np.pi*tx*lines[:,None] + phs[:,None] )
+harmonic = np.sum(harmonics, axis=0)
+nz = np.random.randn(N) * nz_sig
+sig = harmonic + nz
+
+"""
+Take a look at our mock signal.
+"""
+
+pp.figure()
+pp.subplot(211)
+pp.plot(harmonics.T)
+pp.xlim(*(np.array([0.2, 0.3])*N).astype('i'))
+pp.title('Sinusoid components')
+pp.subplot(212)
+pp.plot(harmonic, color='k', linewidth=3)
+pp.plot(sig, color=(.6, .6, .6), linewidth=2, linestyle='--')
+#pp.xlim(2000, 3000)
+pp.xlim(*(np.array([0.2, 0.3])*N).astype('i'))
+pp.title('Signal in noise')
+pp.gcf().tight_layout()
+
+"""
+
+.. image:: fig/mtm_harmonic_test_01.png
+
+"""
+
+"""
+Here we'll use the :func:`utils.detect_lines` function with the given
+Slepian properties (NW), and we'll ensure that we limit spectral bias
+by choosing Slepians with concentration factors greater than 0.9. The
+arrays returned include the detected line frequencies (f) and their
+complex coefficients (b). The frequencies are normalized from :math:`(0,\frac{1}{2})`
+"""
+
+f, b = nt_ut.detect_lines(sig, (NW, 2*NW), low_bias=True, NFFT=2**fft_pow)
+h_est = 2*(b[:,None]*np.exp(2j*np.pi*tx*f[:,None])).real
+
+pp.figure()
+pp.subplot(211)
+pp.plot(harmonics.T, 'c', linewidth=3)
+pp.plot(h_est.T, 'r--', linewidth=2)
+pp.title('%d lines detected'%h_est.shape[0])
+pp.xlim(*(np.array([0.2, 0.3])*N).astype('i'))
+pp.subplot(212)
+err = harmonic - np.sum(h_est, axis=0)
+pp.plot( err**2 )
+pp.title('Error signal')
+pp.show()
+
+"""
+
+.. image:: fig/mtm_harmonic_test_02.png
+
+We can see the quality (or not) of our estimated lines. A breakdown of
+the errors in the various estimated quantities follows in the demo.
+
+"""
+
+phs_est = np.angle(b)
+phs_est[phs_est < 0] += 2*np.pi
+
+phs_err = np.linalg.norm(phs_est - phs)**2
+amp_err = np.linalg.norm(amps - 2*np.abs(b))**2 / np.linalg.norm(amps)**2
+freq_err = np.linalg.norm(lines - f)**2
+
+print 'freqs:', lines, '\testimated:', f, '\terr: %1.3e'%freq_err
+print 'amp:', amps, '\testimated:', 2*np.abs(b), '\terr: %1.3e'%amp_err
+print 'phase:', phs, '\testimated:', phs_est, '\terr: %1.3e'%phs_err
+print 'MS error over noise: %1.3e'%(np.mean(err**2)/nz_sig**2,)
diff --git a/doc/examples/multi_taper_coh.py b/doc/examples/multi_taper_coh.py
new file mode 100755
index 0000000..dabf8cf
--- /dev/null
+++ b/doc/examples/multi_taper_coh.py
@@ -0,0 +1,352 @@
+"""
+
+.. _multi-taper-coh:
+
+
+================================
+Multi-taper coherence estimation
+================================
+
+
+Coherence estimation can be done using windowed-spectra. This is the method
+used in the example :ref:`resting-state`. In addition, multi-taper spectral
+estimation can be used in order to calculate coherence and also confidence
+intervals for the coherence values that result (see :ref:`multi-taper-psd`)
+
+
+The data analyzed here is an fMRI data-set contributed by Beth Mormino. The
+data is taken from a single subject in a"resting-state" scan, in which subjects
+are fixating on a cross and maintaining alert wakefulness, but not performing
+any other behavioral task.
+
+We start by importing modules/functions we will use in this example and define
+variables which will be used as the sampling interval of the TimeSeries
+objects and as upper and lower bounds on the frequency range analyzed:
+
+"""
+
+import os
+
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.mlab import csv2rec
+import scipy.stats.distributions as dist
+from scipy import fftpack
+
+import nitime
+from nitime.timeseries import TimeSeries
+from nitime import utils
+import nitime.algorithms as alg
+import nitime.viz
+from nitime.viz import drawmatrix_channels
+from nitime.analysis import CoherenceAnalyzer, MTCoherenceAnalyzer
+
+TR = 1.89
+f_ub = 0.15
+f_lb = 0.02
+
+"""
+
+We read in the data into a recarray from a csv file:
+
+"""
+
+data_path = os.path.join(nitime.__path__[0], 'data')
+
+data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
+
+
+"""
+
+The first line in the file contains the names of the different brain regions
+(or ROI = regions of interest) from which the time-series were derived. We
+extract the data into a regular array, while keeping the names to be used later:
+
+"""
+
+roi_names = np.array(data_rec.dtype.names)
+nseq = len(roi_names)
+n_samples = data_rec.shape[0]
+data = np.zeros((nseq, n_samples))
+
+for n_idx, roi in enumerate(roi_names):
+ data[n_idx] = data_rec[roi]
+
+
+"""
+
+We normalize the data in each of the ROIs to be in units of % change:
+
+"""
+
+pdata = utils.percent_change(data)
+
+"""
+
+We start by performing the detailed analysis, but note that a significant
+short-cut is presented below, so if you just want to know how to do this
+(without needing to understand the details), skip on down.
+
+We start by defining how many tapers will be used and calculate the values of
+the tapers and the associated eigenvalues of each taper:
+
+"""
+
+NW = 4
+K = 2 * NW - 1
+
+tapers, eigs = alg.dpss_windows(n_samples, NW, K)
+
+"""
+
+We multiply the data by the tapers and derive the fourier transform and the
+magnitude of the squared spectra (the power) for each tapered time-series:
+
+"""
+
+
+tdata = tapers[None, :, :] * pdata[:, None, :]
+tspectra = fftpack.fft(tdata)
+## mag_sqr_spectra = np.abs(tspectra)
+## np.power(mag_sqr_spectra, 2, mag_sqr_spectra)
+
+
+"""
+
+Coherence for real sequences is symmetric, so we calculate this for only half
+the spectrum (the other half is equal):
+
+"""
+
+L = n_samples / 2 + 1
+sides = 'onesided'
+
+"""
+
+We estimate adaptive weighting of the tapers, based on the data (see
+:ref:`multi-taper-psd` for an explanation and references):
+
+"""
+
+w = np.empty((nseq, K, L))
+for i in xrange(nseq):
+ w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)
+
+
+"""
+
+We proceed to calculate the coherence. We initialize empty data containers:
+
+"""
+
+csd_mat = np.zeros((nseq, nseq, L), 'D')
+psd_mat = np.zeros((2, nseq, nseq, L), 'd')
+coh_mat = np.zeros((nseq, nseq, L), 'd')
+coh_var = np.zeros_like(coh_mat)
+
+
+"""
+
+Looping over the ROIs:
+
+"""
+
+for i in xrange(nseq):
+ for j in xrange(i):
+
+ """
+
+ We calculate the multi-tapered cross spectrum between each two
+ time-series:
+
+ """
+
+ sxy = alg.mtm_cross_spectrum(
+ tspectra[i], tspectra[j], (w[i], w[j]), sides='onesided'
+ )
+
+ """
+
+ And the individual PSD for each:
+
+ """
+
+ sxx = alg.mtm_cross_spectrum(
+ tspectra[i], tspectra[i], w[i], sides='onesided'
+ )
+ syy = alg.mtm_cross_spectrum(
+ tspectra[j], tspectra[j], w[j], sides='onesided'
+ )
+
+ psd_mat[0, i, j] = sxx
+ psd_mat[1, i, j] = syy
+
+ """
+
+ Coherence is : $Coh_{xy}(\lambda) = \frac{|{f_{xy}(\lambda)}|^2}{f_{xx}(\lambda) \cdot f_{yy}(\lambda)}$
+
+ """
+
+ coh_mat[i, j] = np.abs(sxy) ** 2
+ coh_mat[i, j] /= (sxx * syy)
+ csd_mat[i, j] = sxy
+
+ """
+
+ The variance from the different samples is calculated using a jack-knife
+ approach:
+
+ """
+
+ if i != j:
+ coh_var[i, j] = utils.jackknifed_coh_variance(
+ tspectra[i], tspectra[j], eigs, adaptive=True,
+ )
+
+
+"""
+
+This measure is normalized, based on the number of tapers:
+
+"""
+
+coh_mat_xform = utils.normalize_coherence(coh_mat, 2 * K - 2)
+
+
+"""
+
+We calculate 95% confidence intervals based on the jack-knife variance
+calculation:
+
+"""
+
+t025_limit = coh_mat_xform + dist.t.ppf(.025, K - 1) * np.sqrt(coh_var)
+t975_limit = coh_mat_xform + dist.t.ppf(.975, K - 1) * np.sqrt(coh_var)
+
+
+utils.normal_coherence_to_unit(t025_limit, 2 * K - 2, t025_limit)
+utils.normal_coherence_to_unit(t975_limit, 2 * K - 2, t975_limit)
+
+if L < n_samples:
+ freqs = np.linspace(0, 1 / (2 * TR), L)
+else:
+ freqs = np.linspace(0, 1 / TR, L, endpoint=False)
+
+
+"""
+
+We look only at frequencies between 0.02 and 0.15 (the physiologically
+relevant band, see http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency:
+
+"""
+
+freq_idx = np.where((freqs > f_lb) * (freqs < f_ub))[0]
+
+"""
+
+We extract the coherence and average over all these frequency bands:
+
+"""
+
+coh = np.mean(coh_mat[:, :, freq_idx], -1) # Averaging on the last dimension
+
+
+"""
+
+The next line calls the visualization routine which displays the data
+
+"""
+
+
+fig01 = drawmatrix_channels(coh,
+ roi_names,
+ size=[10., 10.],
+ color_anchor=0,
+ title='MTM Coherence')
+
+
+"""
+
+.. image:: fig/multi_taper_coh_01.png
+
+Next we perform the same analysis, using the nitime object oriented interface.
+
+We start by initializing a TimeSeries object with this data and with the
+sampling_interval provided above. We set the metadata 'roi' field with the ROI
+names.
+
+
+"""
+
+T = TimeSeries(pdata, sampling_interval=TR)
+T.metadata['roi'] = roi_names
+
+
+"""
+
+We initialize an MTCoherenceAnalyzer object with the TimeSeries object
+
+"""
+
+C2 = MTCoherenceAnalyzer(T)
+
+"""
+
+The relevant indices in the Analyzer object are derived:
+
+"""
+
+freq_idx = np.where((C2.frequencies > 0.02) * (C2.frequencies < 0.15))[0]
+
+
+"""
+The call to C2.coherence triggers the computation and this is averaged over the
+frequency range of interest in the same line and then displayed:
+
+"""
+
+coh = np.mean(C2.coherence[:, :, freq_idx], -1) # Averaging on the last dimension
+fig02 = drawmatrix_channels(coh,
+ roi_names,
+ size=[10., 10.],
+ color_anchor=0,
+ title='MTCoherenceAnalyzer')
+
+
+"""
+
+.. image:: fig/multi_taper_coh_02.png
+
+
+For comparison, we also perform the analysis using the standard
+CoherenceAnalyzer object, which does the analysis using Welch's windowed
+periodogram, instead of the multi-taper spectral estimation method (see
+:ref:`resting_state` for a more thorough analysis of this data using this
+method):
+
+"""
+
+C3 = CoherenceAnalyzer(T)
+
+freq_idx = np.where((C3.frequencies > f_lb) * (C3.frequencies < f_ub))[0]
+
+#Extract the coherence and average across these frequency bands:
+coh = np.mean(C3.coherence[:, :, freq_idx], -1) # Averaging on the last dimension
+fig03 = drawmatrix_channels(coh,
+ roi_names,
+ size=[10., 10.],
+ color_anchor=0,
+ title='CoherenceAnalyzer')
+
+
+"""
+
+.. image:: fig/multi_taper_coh_03.png
+
+
+plt.show() is called in order to display the figures:
+
+
+"""
+
+plt.show()
diff --git a/doc/examples/multi_taper_spectral_estimation.py b/doc/examples/multi_taper_spectral_estimation.py
new file mode 100644
index 0000000..0935381
--- /dev/null
+++ b/doc/examples/multi_taper_spectral_estimation.py
@@ -0,0 +1,416 @@
+"""
+
+.. _multi-taper-psd:
+
+===============================
+Multi-taper spectral estimation
+===============================
+
+The distribution of power in a signal, as a function of frequency, known as the
+power spectrum (or PSD, for power spectral density) can be estimated using
+variants of the discrete Fourier transform (DFT). The naive estimate of the
+power spectrum, based on the values of the DFT estimated directly from the
+signal, using the fast Fourier transform algorithm (FFT) is referred to as a
+periodogram (see :func:`algorithms.periodogram`). This estimate suffers from
+several problems [NR2007]_:
+
+- Inefficiency: In most estimation problems, additional samples, or a denser
+ sampling grid would usually lead to a better estimate (smaller variance of
+ the estimate, given a constant level of noise). However, this is not the case
+ for the periodogram. Even as we add more samples to our signal, or increase
+ our sampling rate, our estimate at frequency $f_k$ does not improve. This is
+ because of the effects these kinds of changes have on spectral
+ estimates. Adding additional samples will improve the frequency domain
+ resolution of our estimate and sampling at a finer rate will change the
+ Nyquist frequency, the highest frequency for which the spectrum can be
+ estimated. Thus, these changes do not improve the estimate at frequency
+ $f_k$.
+
+The inefficiency problem can be solved by treating different parts of the
+signal as different samples from the same distribution, while assuming
+stationarity of the signal. In this method, a sliding window is applied to
+different parts of the signal and the windowed spectrum is averaged from these
+different samples. This is sometimes referred to as Welch's periodogram
+[Welch1967]_ and it is the default method used in
+:func:`algorithms.get_spectra` (with the hanning window as the window function
+used and no overlap between the windows). However, it may lead to the
+following problem:
+
+- Spectral leakage and bias: Spectral leakage refers to the fact that the
+ estimate of the spectrum at any given frequency bin is contaminated with the
+ power from other frequency bands. This is a consequence of the fact that we
+ always look at a time-limited signal. In the naive peridogram estimate all
+ the samples within the time-limited signal are taken as they are (implicitly
+ multiplied by 1) and all the samples outside of this time-limited signal are
+ not taken at all (implicitly multiplied by 0). This is akin to what would
+ happen if the signal were multiplied sample-by-sample with a 'boxcar' window,
+ so called because the shape of this window is square, going from 0 to 1 over
+ one sampling window. Multiplying the signal with a boxcar window in the
+ time-domain is equivalent (due to the convolution theorem) to convolving it
+ in the frequency domain with the spectrum of the boxcar window. The spectral
+ leakage induced by this operation is demonstrated in the following example.
+
+
+We start by importing the modules/functions we will need in this example
+
+
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+import scipy.signal as sig
+import scipy.stats.distributions as dist
+
+import nitime.algorithms as tsa
+import nitime.utils as utils
+from nitime.viz import winspect
+from nitime.viz import plot_spectral_estimate
+
+"""
+For demonstration, we will use a window of 128 points:
+"""
+
+npts = 128
+
+fig01 = plt.figure()
+
+# Boxcar with zeroed out fraction
+b = sig.boxcar(npts)
+zfrac = 0.15
+zi = int(npts * zfrac)
+b[:zi] = b[-zi:] = 0
+name = 'Boxcar - zero fraction=%.2f' % zfrac
+winspect(b, fig01, name)
+
+"""
+
+.. image:: fig/multi_taper_spectral_estimation_01.png
+
+The figure on the left shows a boxcar window and the figure on the right
+shows the spectrum of the boxcar function (in dB units, relative to the
+frequency band of interest).
+
+These two problems can together be mitigated through the use of other
+windows. Other windows have been designed in order to optimize the amount of
+spectral leakage and limit it to certain parts of the spectrum. The following
+example demonstrates the spectral leakage for several different windows
+(including the boxcar):
+"""
+
+fig02 = plt.figure()
+
+# Boxcar with zeroed out fraction
+b = sig.boxcar(npts)
+zfrac = 0.15
+zi = int(npts * zfrac)
+b[:zi] = b[-zi:] = 0
+name = 'Boxcar - zero fraction=%.2f' % zfrac
+winspect(b, fig02, name)
+
+winspect(sig.hanning(npts), fig02, 'Hanning')
+winspect(sig.bartlett(npts), fig02, 'Bartlett')
+winspect(sig.barthann(npts), fig02, 'Modified Bartlett-Hann')
+
+"""
+
+.. image:: fig/multi_taper_spectral_estimation_02.png
+
+As before, the left figure displays the windowing function in the temporal
+domain and the figure on the left displays the attentuation of spectral leakage
+in the other frequency bands in the spectrum. Notice that though different
+windowing functions have different spectral attenuation profiles, trading off
+attenuation of leakage from frequency bands near the frequency of interest
+(narrow-band leakage) with leakage from faraway frequency bands (broad-band
+leakage) they are all superior in both of these respects to the boxcar window
+used in the naive periodogram.
+
+Another approach which deals with both the inefficiency problem and with the
+spectral leakage problem is the use of taper functions. In this approach, the
+entire signal is multiplied by a time-varying function. Several of these
+functions may be used in order to emphasize and de-emphasize different parts of
+the signal and these can be constructed to be orthogonal to each other,
+constructing maximally independent samples at the length of the signal. As we
+will see below, this allows for statistical estimation of the distribution of
+the spectrum.
+
+Discrete prolate spheroidal sequences (DPSS, also known as Slepian sequences)
+[Slepian1978]_ are a class of taper functions which are constructed as a
+solution to the problem of concentrating the spectrum to within a pre-specified
+bandwidth. These tapers can be constructed using
+:func:`algorithms.dpss_windows`, but for the purpose of spectral estimation, it
+is sufficient to specify the bandwidth (which defines the boundary between
+narrow-band and broad-band leakage) as an input to
+:func:`algorithms.mutli_taper_psd` and this function will then construct the
+appropriate windows, calculate the tapered spectra and average them.
+
+We will demonstrate the use of DPSS in spectral estimation on a time-series
+with known spectral properties generated from an auto-regressive process.
+
+We start by defining a function which will be used throughout this example:
+
+"""
+
+
+def dB(x, out=None):
+ if out is None:
+ return 10 * np.log10(x)
+ else:
+ np.log10(x, out)
+ np.multiply(out, 10, out)
+
+
+"""
+
+And the conversion factor from ln to dB:
+
+"""
+
+ln2db = dB(np.e)
+
+
+"""
+
+Next, we generate a sequence with known spectral properties:
+
+"""
+
+N = 512
+ar_seq, nz, alpha = utils.ar_generator(N=N, drop_transients=10)
+ar_seq -= ar_seq.mean()
+
+"""
+
+This is the true PSD for this sequence:
+
+"""
+
+fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=N)
+psd = (hz * hz.conj()).real
+
+"""
+
+This is a one-sided spectrum, so we double the power:
+
+"""
+
+psd *= 2
+dB(psd, psd)
+
+
+"""
+
+We begin by using the naive periodogram function (:func:`tsa.periodogram` in
+order to calculate the PSD and compare that to the true PSD calculated above.
+
+
+"""
+
+freqs, d_psd = tsa.periodogram(ar_seq)
+dB(d_psd, d_psd)
+
+fig03 = plot_spectral_estimate(freqs, psd, (d_psd,), elabels=("Periodogram",))
+
+"""
+
+.. image:: fig/multi_taper_spectral_estimation_03.png
+
+
+Next, we use Welch's periodogram, by applying :func:`tsa.get_spectra`. Note
+that we explicitely provide the function with a 'method' dict, which specifies
+the method used in order to calculate the PSD, but the default method is 'welch'.
+
+
+"""
+
+welch_freqs, welch_psd = tsa.get_spectra(ar_seq,
+ method=dict(this_method='welch', NFFT=N))
+welch_freqs *= (np.pi / welch_freqs.max())
+welch_psd = welch_psd.squeeze()
+dB(welch_psd, welch_psd)
+
+fig04 = plot_spectral_estimate(freqs, psd, (welch_psd,), elabels=("Welch",))
+
+
+"""
+
+.. image:: fig/multi_taper_spectral_estimation_04.png
+
+
+Next, we use the multi-taper estimation method. We estimate the spectrum:
+
+"""
+
+f, psd_mt, nu = tsa.multi_taper_psd(
+ ar_seq, adaptive=False, jackknife=False
+ )
+dB(psd_mt, psd_mt)
+
+
+"""
+
+And get the number of tapers from here:
+
+"""
+
+Kmax = nu[0] / 2
+
+
+"""
+
+We calculate a Chi-squared model 95% confidence interval 2*Kmax degrees of
+freedom (see [Percival1993]_ eq 258)
+
+
+"""
+
+p975 = dist.chi2.ppf(.975, 2 * Kmax)
+p025 = dist.chi2.ppf(.025, 2 * Kmax)
+
+l1 = ln2db * np.log(2 * Kmax / p975)
+l2 = ln2db * np.log(2 * Kmax / p025)
+
+hyp_limits = (psd_mt + l1, psd_mt + l2)
+
+fig05 = plot_spectral_estimate(freqs, psd, (psd_mt,), hyp_limits,
+ elabels=(r"MT with $\chi^{2}$ 95% interval",))
+
+"""
+
+.. image:: fig/multi_taper_spectral_estimation_05.png
+
+
+An iterative method ([Thomson2007]_) can be used in order to adaptively set the
+weighting of the different tapers, according to the actual spectral
+concentration in the given signal (and not only the theoretical spectral
+concentration calculated per default).
+
+"""
+
+f, adaptive_psd_mt, nu = tsa.multi_taper_psd(
+ ar_seq, adaptive=True, jackknife=False
+ )
+dB(adaptive_psd_mt, adaptive_psd_mt)
+
+p975 = dist.chi2.ppf(.975, nu)
+p025 = dist.chi2.ppf(.025, nu)
+
+l1 = ln2db * np.log(nu / p975)
+l2 = ln2db * np.log(nu / p025)
+
+hyp_limits = (adaptive_psd_mt + l1, adaptive_psd_mt + l2)
+
+fig06 = plot_spectral_estimate(freqs, psd, (adaptive_psd_mt,), hyp_limits,
+ elabels=('MT with adaptive weighting and 95% interval',))
+
+
+"""
+
+.. image:: fig/multi_taper_spectral_estimation_06.png
+
+As metioned above, in addition to estimating the spectrum itself, an estimate
+of the confidence interval of the spectrum can be generated using a
+jack-knifing procedure [Thomson2007]_.
+
+Let us define the following:
+
+| **simple sample estimate**
+| :math:`\hat{\theta} = \dfrac{1}{n}\sum_i Y_i`
+
+This is the parameter estimate averaged from all the samples in the
+distribution (all the tapered spectra).
+
+| **leave-one-out measurement**
+| :math:`\hat{\theta}_{-i} = \dfrac{1}{n-1}\sum_{k \neq i}Y_k`
+
+This defines a group of estimates, where each sample is based on leaving one
+measurement (one tapered spectrum) out.
+
+| **pseudovalues**
+| :math:`\hat{\theta}_i = n\hat{\theta} - (n-1)\hat{\theta}_{-i}`
+
+The jackknifed esimator is computed as:
+
+:math:`\tilde{\theta} = \dfrac{1}{n}\sum_i \hat{\theta}_i = n\hat{\theta} - \dfrac{n-1}{n}\sum_i \hat{\theta}_{-i}`
+
+This estimator is known [Thomson2007]_ to be distributed about the true parameter \theta approximately as a Student's t distribution, with standard error defined as:
+
+:math:`s^{2} = \dfrac{n-1}{n}\sum_i \left(\hat{\theta}_i - \tilde{\theta}\right)^{2}`
+
+And degrees of freedom which depend on the number of tapers used (Kmax-1):
+
+"""
+
+_, _, jk_var = tsa.multi_taper_psd(ar_seq, adaptive=False, jackknife=True)
+
+jk_p = (dist.t.ppf(.975, Kmax - 1) * np.sqrt(jk_var)) * ln2db
+
+jk_limits = (psd_mt - jk_p, psd_mt + jk_p)
+
+
+fig07 = plot_spectral_estimate(freqs, psd, (psd_mt,),
+ jk_limits,
+ elabels=('MT with JK 95% interval',))
+
+
+"""
+
+.. image:: fig/multi_taper_spectral_estimation_07.png
+
+In addition, if the 'adaptive' flag is set to True, an iterative adaptive
+method is used in order to correct bias in the spectrum.
+
+Finally, we combine the adaptive estimation of the weights with the
+jack-knifing procedure.
+
+"""
+
+
+_, _, adaptive_jk_var = tsa.multi_taper_psd(
+ ar_seq, adaptive=True, jackknife=True
+ )
+
+# find 95% confidence limits from inverse of t-dist CDF
+jk_p = (dist.t.ppf(.975, Kmax - 1) * np.sqrt(adaptive_jk_var)) * ln2db
+
+adaptive_jk_limits = (adaptive_psd_mt - jk_p, adaptive_psd_mt + jk_p)
+
+fig08 = plot_spectral_estimate(freqs, psd, (adaptive_psd_mt,),
+ adaptive_jk_limits,
+ elabels=('adaptive-MT with JK 95% interval',))
+
+
+"""
+
+.. image:: fig/multi_taper_spectral_estimation_08.png
+
+We call plt.show() in order to show all the figures:
+
+"""
+
+plt.show()
+
+"""
+
+References
+
+.. [NR2007] W.H. Press, S.A. Teukolsky, W.T Vetterling and B.P. Flannery (2007)
+ Numerical Recipes: The Art of Scientific Computing. Cambridge:
+ Cambridge University Press. 3rd Ed.
+
+.. [Thomson2007] D.J. Thomson, Jackknifing Multitaper Spectrum Estimates, IEEE
+ Signal Processing Magazine, 2007, pp. 20-30.
+
+.. [Welch1967] P.D. Welch (1967), The use of the fast fourier transform for the
+ estimation of power spectra: a method based on time averaging
+ over short modified periodograms. IEEE Transcations on Audio and
+ Electroacoustics.
+
+.. [Slepian1978] Slepian, D. Prolate spheroidal wave functions, Fourier
+ analysis, and uncertainty V: The discrete case. Bell System
+ Technical Journal, Volume 57 (1978), 1371430
+
+.. [Percival1993] Percival D.B. and Walden A.T. (1993) Spectral Analysis for
+ Physical Applications: Multitaper and Conventional Univariate
+ Techniques. Cambridge University Press
+
+"""
diff --git a/doc/examples/note_about_examples.txt b/doc/examples/note_about_examples.txt
new file mode 100644
index 0000000..d69b953
--- /dev/null
+++ b/doc/examples/note_about_examples.txt
@@ -0,0 +1,18 @@
+**A note about the examples**
+
+The examples here are some uses of the analysis and visualization functionality
+of nitime, with example data from actual neuroscience experiments, or with
+synthetic data, which is generated as part of the example.
+
+All the examples presented in the documentation are generated from *fully
+functioning* python scripts, which are available as part of the source
+distribution in the doc/examples folder.
+
+If you want to replicate a particular analysis or visualization, simply copy
+the relevant ".py" script from the source distribution, or download the script
+using the link at the bottom of each page.
+
+Thanks to the developers of PyMVPA_ for designing the software which enables us
+to provide these documented examples!
+
+.. include:: ../links_names.txt
diff --git a/doc/examples/resting_state_fmri.py b/doc/examples/resting_state_fmri.py
new file mode 100644
index 0000000..acf29ea
--- /dev/null
+++ b/doc/examples/resting_state_fmri.py
@@ -0,0 +1,366 @@
+"""
+
+.. _resting-state:
+
+===============================
+Coherency analysis of fMRI data
+===============================
+
+The fMRI data-set analyzed in the following examples was contributed by Beth
+Mormino. The data is taken from a single subject in a "resting-state" scan, in
+which subjects are fixating on a cross and maintaining alert wakefulness, but
+not performing any other behavioral task.
+
+The data was pre-processed and time-series of BOLD responses were extracted
+from different regions of interest (ROIs) in the brain. The data is organized
+in csv file, where each column corresponds to an ROI and each row corresponds
+to a sampling point.
+
+In the following, we will demonstrate some simple time-series analysis and
+visualization techniques which can be applied to this kind of data.
+
+
+We start by importing the necessary modules/functions, defining the
+sampling_interval of the data (TR, or repetition time) and the frequency band
+of interest:
+
+"""
+
+import os
+
+#Import from other libraries:
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.mlab import csv2rec
+
+import nitime
+#Import the time-series objects:
+from nitime.timeseries import TimeSeries
+#Import the analysis objects:
+from nitime.analysis import CorrelationAnalyzer, CoherenceAnalyzer
+#Import utility functions:
+from nitime.utils import percent_change
+from nitime.viz import drawmatrix_channels, drawgraph_channels, plot_xcorr
+
+#This information (the sampling interval) has to be known in advance:
+TR = 1.89
+f_lb = 0.02
+f_ub = 0.15
+
+"""
+
+We use csv2rec to read the data in from file to a recarray:
+
+"""
+
+data_path = os.path.join(nitime.__path__[0], 'data')
+
+data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
+
+"""
+This data structure contains in its dtype a field 'names', which contains the
+first row in each column. In this case, that is the labels of the ROIs from
+which the data in each column was extracted. The data from the recarray is
+extracted into a 'standard' array and, for each ROI, it is normalized to
+percent signal change, using the utils.percent_change function.
+
+"""
+
+#Extract information:
+roi_names = np.array(data_rec.dtype.names)
+n_samples = data_rec.shape[0]
+
+
+#Make an empty container for the data
+data = np.zeros((len(roi_names), n_samples))
+
+for n_idx, roi in enumerate(roi_names):
+ data[n_idx] = data_rec[roi]
+
+#Normalize the data:
+data = percent_change(data)
+
+
+"""
+
+We initialize a TimeSeries object from the normalized data:
+
+"""
+
+T = TimeSeries(data, sampling_interval=TR)
+T.metadata['roi'] = roi_names
+
+
+"""
+
+First, we examine the correlations between the time-series extracted from
+different parts of the brain. The following script extracts the data (using the
+draw_matrix function, displaying the correlation matrix with the ROIs labeled.
+
+"""
+
+#Initialize the correlation analyzer
+C = CorrelationAnalyzer(T)
+
+#Display the correlation matrix
+fig01 = drawmatrix_channels(C.corrcoef, roi_names, size=[10., 10.], color_anchor=0)
+
+
+"""
+
+.. image:: fig/resting_state_fmri_01.png
+
+Notice that setting the color_anchor input to this function to 0 makes sure
+that the center of the color map (here a blue => white => red) is at 0. In this
+case, positive values will be displayed as red and negative values in blue.
+
+We notice that the left caudate nucleus (labeled 'lcau') has an interesting
+pattern of correlations. It has a high correlation with both the left putamen
+('lput', which is located nearby) and also with the right caudate nucleus
+('lcau'), which is the homologous region in the other hemisphere. Are these two
+correlation values related to each other? The right caudate and left putamen
+seem to have a moderately low correlation value. One way to examine this
+question is by looking at the temporal structure of the cross-correlation
+functions. In order to do that, from the CorrelationAnalyzer object, we extract
+the normalized cross-correlation function. This results in another TimeSeries`
+object, which contains the full time-series of the cross-correlation between
+any combination of time-series from the different channels in the time-series
+object. We can pass the resulting object, together with a list of indices to
+the viz.plot_xcorr function, which visualizes the chosen combinations of
+series:
+
+"""
+
+xc = C.xcorr_norm
+
+idx_lcau = np.where(roi_names == 'lcau')[0]
+idx_rcau = np.where(roi_names == 'rcau')[0]
+idx_lput = np.where(roi_names == 'lput')[0]
+idx_rput = np.where(roi_names == 'rput')[0]
+
+fig02 = plot_xcorr(xc,
+ ((idx_lcau, idx_rcau),
+ (idx_lcau, idx_lput)),
+ line_labels=['rcau', 'lput'])
+
+
+"""
+
+.. image:: fig/resting_state_fmri_02.png
+
+
+Note that the correlation is normalized, so that the the value of the
+cross-correlation functions at the zero-lag point (time = 0 sec) is equal to
+the Pearson correlation between the two time-series. We observe that there are
+correlations larger than the zero-lag correlation occurring at other
+time-points preceding and following the zero-lag. This could arise because of a
+more complex interplay of activity between two areas, which is not captured by
+the correlation and can also arise because of differences in the
+characteristics of the HRF in the two ROIs. One method of analysis which can
+mitigate these issues is analysis of coherency between time-series
+[Sun2005]_. This analysis computes an equivalent of the correlation in the
+frequency domain:
+
+.. math::
+
+ R_{xy} (\lambda) = \frac{f_{xy}(\lambda)}
+ {\sqrt{f_{xx} (\lambda) \cdot f_{yy}(\lambda)}}
+
+Because this is a complex number, this computation results in two
+quantities. First, the magnitude of this number, also referred to as
+"coherence":
+
+.. math::
+
+ Coh_{xy}(\lambda) = |{R_{xy}(\lambda)}|^2 =
+ \frac{|{f_{xy}(\lambda)}|^2}{f_{xx}(\lambda) \cdot f_{yy}(\lambda)}
+
+This is a measure of the pairwise coupling between the two time-series. It can
+vary between 0 and 1, with 0 being complete independence and 1 being complete
+coupling. A time-series would have a coherence of 1 with itself, but not only:
+since this measure is independent of the relative phase of the two time-series,
+the coherence between a time-series and any phase-shifted version of itself
+will also be equal to 1.
+
+However, the relative phase is another quantity which can be derived from this
+computation:
+
+.. math::
+
+ \phi(\lambda) = arg [R_{xy} (\lambda)] = arg [f_{xy} (\lambda)]
+
+
+This value can be used in order to infer which area is leading and which area
+is lagging (according to the sign of the relative phase) and, can be used to
+compute the temporal delay between activity in one ROI and the other.
+
+First, let's look at the pair-wise coherence between all our ROIs. This can be
+done by creating a CoherenceAnalyzer object.
+
+"""
+
+C = CoherenceAnalyzer(T)
+
+"""
+
+Once this object is initialized with the TimeSeries object, the mid-frequency
+of the frequency bands represented in the spectral decomposition of the
+time-series can be accessed in the 'frequencies' attribute of the object. The
+spectral resolution of this representation is the same one used in the
+computation of the coherence.
+
+Since the fMRI BOLD data contains data in frequencies which are not
+physiologically relevant (presumably due to machine noise and fluctuations in
+physiological measures unrelated to neural activity), we focus our analysis on
+a band of frequencies between 0.02 and 0.15 Hz. This is easily achieved by
+determining the values of the indices in :attr:`C.frequencies` and using those
+indices in accessing the data in :attr:`C.coherence`. The coherence is then
+averaged across all these frequency bands.
+
+"""
+
+freq_idx = np.where((C.frequencies > f_lb) * (C.frequencies < f_ub))[0]
+
+"""
+The C.coherence attribute is an ndarray of dimensions $n_{ROI}$ by $n_{ROI}$ by
+$n_{frequencies}$.
+
+We extract the coherence in that frequency band, average across the frequency
+bands of interest and pass that to the visualization function:
+
+"""
+
+
+coh = np.mean(C.coherence[:, :, freq_idx], -1) # Averaging on the last dimension
+fig03 = drawmatrix_channels(coh, roi_names, size=[10., 10.], color_anchor=0)
+
+"""
+
+.. image:: fig/resting_state_fmri_03.png
+
+We can also focus in on the ROIs we were interested in. This requires a little
+bit more manipulation of the indices into the coherence matrix:
+
+"""
+
+idx = np.hstack([idx_lcau, idx_rcau, idx_lput, idx_rput])
+idx1 = np.vstack([[idx[i]] * 4 for i in range(4)]).ravel()
+idx2 = np.hstack(4 * [idx])
+
+coh = C.coherence[idx1, idx2].reshape(4, 4, C.frequencies.shape[0])
+
+"""
+
+Extract the coherence and average across the same frequency bands as before:
+
+"""
+
+
+coh = np.mean(coh[:, :, freq_idx], -1) # Averaging on the last dimension
+
+"""
+
+Finally, in this case, we visualize the adjacency matrix, by creating a network
+graph of these ROIs (this is done by using the function drawgraph_channels
+which relies on `networkx <http://networkx.lanl.gov>`_):
+
+"""
+
+fig04 = drawgraph_channels(coh, roi_names[idx])
+
+"""
+
+.. image:: fig/resting_state_fmri_04.png
+
+This shows us that there is a stronger connectivity between the left putamen and
+the left caudate than between the homologous regions in the other
+hemisphere. In particular, in contrast to the relatively high correlation
+between the right caudate and the left caudate, there is a rather low coherence
+between the time-series in these two regions, in this frequency range.
+
+Note that the connectivity described by coherency (and other measures of
+functional connectivity) could arise because of neural connectivity between the
+two regions, but also due to a common blood supply, or common fluctuations in
+other physiological measures which affect the BOLD signal measured in both
+regions. In order to be able to differentiate these two options, we would have
+to conduct a comparison between two different behavioral states that affect the
+neural activity in the two regions, without affecting these common
+physiological factors, such as common blood supply (for an in-depth discussion
+of these issues, see [Silver2010]_). In this case, we will simply assume that
+the connectivity matrix presented represents the actual neural connectivity
+between these two brain regions.
+
+We notice that there is indeed a stronger coherence between left putamen and the
+left caudate than between the left caudate and the right caudate. Next, we
+might ask whether the moderate coherence between the left putamen and the right
+caudate can be accounted for by the coherence these two time-series share with
+the time-series derived from the left caudate. This kind of question can be
+answered using an analysis of partial coherency. For the time series $x$ and
+$y$, the partial coherence, given a third time-series $r$, is defined as:
+
+.. math::
+
+ Coh_{xy|r} = \frac{|{R_{xy}(\lambda) - R_{xr}(\lambda)
+ R_{ry}(\lambda)}|^2}{(1-|{R_{xr}}|^2)(1-|{R_{ry}}|^2)}
+
+
+In this case, we extract the partial coherence between the three regions,
+excluding common effects of the left caudate. In order to do that, we generate
+the partial-coherence attribute of the :class:`CoherenceAnalyzer` object, while
+indexing on the additional dimension which this object had (the coherence
+between time-series $x$ and time-series $y$, *given* time series $r$):
+
+"""
+
+
+idx3 = np.hstack(16 * [idx_lcau])
+coh = C.coherence_partial[idx1, idx2, idx3].reshape(4, 4, C.frequencies.shape[0])
+coh = np.mean(coh[:, :, freq_idx], -1)
+
+"""
+
+
+Again, we visualize the result, using both the :func:`viz.drawgraph_channels`
+and the :func:`drawmatrix_channels` functions:
+
+
+"""
+
+fig05 = drawgraph_channels(coh, roi_names[idx])
+fig06 = drawmatrix_channels(coh, roi_names[idx], color_anchor=0)
+
+"""
+
+.. image:: fig/resting_state_fmri_05.png
+
+
+.. image:: fig/resting_state_fmri_06.png
+
+
+As can be seen, the resulting partial coherence between left putamen and right
+caudate, given the activity in the left caudate is smaller than the coherence
+between these two areas, suggesting that part of this coherence can be
+explained by their common connection to the left caudate.
+
+XXX Add description of calculation of temporal delay here.
+
+
+We call plt.show() in order to display the figures:
+
+"""
+
+plt.show()
+
+
+"""
+
+.. [Sun2005] F.T. Sun and L.M. Miller and M. D'Esposito(2005). Measuring
+ temporal dynamics of functional networks using phase spectrum of
+ fMRI data. Neuroimage, 28: 227-37.
+
+.. [Silver2010] M.A Silver, AN Landau, TZ Lauritzen, W Prinzmetal, LC
+ Robertson(2010) Isolating human brain functional connectivity associated
+ with a specific cognitive process, in Human Vision and Electronic Imaging
+ XV, edited by B.E. Rogowitz and T.N. Pappas, Proceedings of SPIE, Volume
+ 7527, pp. 75270B-1 to 75270B-9
+"""
diff --git a/doc/examples/seed_analysis.py b/doc/examples/seed_analysis.py
new file mode 100644
index 0000000..9404e11
--- /dev/null
+++ b/doc/examples/seed_analysis.py
@@ -0,0 +1,273 @@
+"""
+
+=========================================
+Seed correlation/coherence with fMRI data
+=========================================
+
+
+Seed-based analysis is the analysis of a bivariate measure (such as correlation
+or coherence) between one time-series (termed the 'seed') and many other
+time-series (termed the 'targets'). This is a rather typical strategy in the
+analysis of fMRI data where one might look for all the areas of the brain that
+exhibit high level of connectivity to a particular region of interest.
+
+
+We start by importing the needed modules. First modules from the standard lib
+and from 3rd parties:
+
+"""
+
+import os
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+"""
+
+Notice that nibabel (http://nipy.org.nibabel) is required in order to run this
+example, so we test whether the user has that installed and throw an
+informative error if not:
+
+"""
+
+try:
+ from nibabel import load
+except ImportError:
+ raise ImportError('You need nibabel (http:/nipy.org/nibabel/) in order to run this example')
+
+"""
+
+The following are nitime modules:
+
+"""
+
+import nitime
+import nitime.analysis as nta
+import nitime.fmri.io as io
+
+"""
+
+We define the TR of the analysis and the frequency band of interest:
+
+"""
+
+TR = 1.35
+f_lb = 0.02
+f_ub = 0.15
+
+
+"""
+
+An fMRI data file with some actual fMRI data is shipped as part of the
+distribution, the following line will find the path to this data on the
+specific setup:
+
+"""
+
+data_path = test_dir_path = os.path.join(nitime.__path__[0], 'data')
+
+fmri_file = os.path.join(data_path, 'fmri1.nii.gz')
+
+
+"""
+
+Read in the data, using nibabel:
+
+"""
+
+fmri_data = load(fmri_file)
+
+
+"""
+Notice that 'fmri_data' is not an array, but rather a NiftiImage
+object. Nibabel cleverly delays the actual allocation of memory and reading
+from file as long as possible. In this case, we only want information that is
+available through the header of the nifti file, namely the dimensions of the
+data.
+
+We extract only the spatial dimensions of the data, excluding the last
+dimension which is the time-dimension and generate a coords list:
+
+"""
+
+volume_shape = fmri_data.shape[:-1]
+
+coords = list(np.ndindex(volume_shape))
+
+
+"""
+
+We choose some number of random voxels to serve as seed voxels:
+
+"""
+
+n_seeds = 3
+
+# Choose n_seeds random voxels to be the seed voxels
+seeds = np.random.randint(0, len(coords), n_seeds)
+coords_seeds = np.array(coords)[seeds].T
+
+
+"""
+
+The entire volume is chosen to be the target:
+
+"""
+
+coords_target = np.array(coords).T
+
+
+"""
+
+We use nitime.fmri.io in order to generate TimeSeries objects from spatial
+coordinates in the data file:
+
+"""
+
+# Make the seed time series:
+time_series_seed = io.time_series_from_file(fmri_file,
+ coords_seeds,
+ TR=TR,
+ normalize='percent',
+ filter=dict(lb=f_lb,
+ ub=f_ub,
+ method='boxcar'))
+
+# Make the target time series:
+time_series_target = io.time_series_from_file(fmri_file,
+ coords_target,
+ TR=TR,
+ normalize='percent',
+ filter=dict(lb=f_lb,
+ ub=f_ub,
+ method='boxcar'))
+
+
+"""
+
+The SeedCoherencAnalyzer receives as input both of these TimeSeries and
+calculates the coherence of each of the channels in the seed TimeSeries to
+*all* the channels in the target TimeSeries. Here we initialize it with these
+and with a method dict, which specifies the parameters of the spectral analysis
+used for the coherence estimation:
+
+"""
+
+A = nta.SeedCoherenceAnalyzer(time_series_seed, time_series_target,
+ method=dict(NFFT=20))
+
+
+"""
+
+Similarly, the SeedCorrelationAnalyzer receives as input seed and target
+time-series:
+
+"""
+
+B = nta.SeedCorrelationAnalyzer(time_series_seed, time_series_target)
+
+"""
+
+For the coherence, we are only interested in the physiologically relevant
+frequency band:
+
+"""
+
+freq_idx = np.where((A.frequencies > f_lb) * (A.frequencies < f_ub))[0]
+
+
+"""
+
+The results in both analyzer objects are arrays of dimensions: (number of seeds
+x number of targets). For the coherence, there is an additional last dimension
+of: number of frequency bands, which we will average over. For the
+visualization, we extract the coherence and correlation values for each one of
+the seeds separately:
+
+"""
+
+cor = []
+coh = []
+
+for this_seed in range(n_seeds):
+ # Extract the coherence and average across these frequency bands:
+ coh.append(np.mean(A.coherence[this_seed][:, freq_idx], -1)) # Averaging on the
+ # last dimension
+
+ cor.append(B.corrcoef[this_seed]) # No need to do any additional
+ # computation
+
+
+"""
+
+We then put the coherence/correlation values back into arrays that have the
+original shape of the volume from which the data was extracted:
+
+"""
+
+#For numpy fancy indexing into volume arrays:
+coords_indices = list(coords_target)
+
+vol_coh = []
+vol_cor = []
+for this_vol in range(n_seeds):
+ vol_coh.append(np.empty(volume_shape))
+ vol_coh[-1][coords_indices] = coh[this_vol]
+ vol_cor.append(np.empty(volume_shape))
+ vol_cor[-1][coords_indices] = cor[this_vol]
+
+
+"""
+
+We visualize this by choosing a random slice from the data:
+
+"""
+
+#Choose a random slice to display:
+random_slice = np.random.randint(0, volume_shape[-1], 1)
+
+
+"""
+
+We display the coherence and correlation values for each seed voxel in this slice:
+
+"""
+
+fig01 = plt.figure()
+fig02 = plt.figure()
+ax_coh = []
+ax_cor = []
+for this_vox in range(n_seeds):
+ ax_coh.append(fig01.add_subplot(1, n_seeds, this_vox + 1))
+ ax_coh[-1].matshow(vol_coh[this_vox][:, :, random_slice].squeeze())
+ ax_coh[-1].set_title('Seed coords: %s' % coords_seeds[:, this_vox])
+
+ ax_cor.append(fig02.add_subplot(1, n_seeds, this_vox + 1))
+ ax_cor[-1].matshow(vol_cor[this_vox][:, :, random_slice].squeeze())
+ ax_cor[-1].set_title('Seed coords: %s' % coords_seeds[:, this_vox])
+
+for x in zip(['Coherence', 'Correlation'], [fig01, fig02]):
+ suptit = '%s between all the voxels in slice: ' % x[0]
+ suptit += '%i and seed voxels' % random_slice
+ x[1].suptitle(suptit)
+
+
+"""
+
+We can now compare the results in the coherence:
+
+
+.. image:: fig/seed_analysis_01.png
+
+
+And the correlation:
+
+.. image:: fig/seed_analysis_02.png
+
+
+We call plt.show() in order to display the figure:
+
+"""
+
+plt.show()
diff --git a/doc/examples/snr_example.py b/doc/examples/snr_example.py
new file mode 100644
index 0000000..1a2c2a5
--- /dev/null
+++ b/doc/examples/snr_example.py
@@ -0,0 +1,185 @@
+"""
+==============================================
+Caclulation of Signal to noise and information
+==============================================
+
+This method is based on ideas described in [Borst1999]_ (Figure 2) and
+[Hsu2004]_. The calculation can be used, for example, in order to estimate the
+channel capacity of a neuron responding to a repeated stimulus.
+
+The estimate of the information is based on the formula
+
+
+.. math::
+
+ I(S,R) = \int_{0}^{Nyquist}log_2(1+SNR(\omega))d\omega
+
+
+Where $SNR(\omega)$ is the ratio of the signal power and the noise power at the
+frequency band centered on $\omega$.This equation holde true for a Gaussian
+channel and is an upper bound for all other cases.
+
+The signal power is estimated as the power of the mean response to repeated
+presentations of the same signal and the noise power is calculated as the
+average of the power in the deviation from this average in each trial
+
+We import the neccesary modules:
+
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+import nitime.utils as utils
+import nitime.timeseries as ts
+import nitime.viz as viz
+
+"""
+
+For this example, we generate an auto-regressive sequence to be the signal:
+
+"""
+
+ar_seq, nz, alpha = utils.ar_generator(N=128, drop_transients=10)
+ar_seq -= ar_seq.mean()
+
+"""
+
+The signal will be repeated several times, adding noise to the signal in each
+repetition:
+
+"""
+
+n_trials = 12
+
+fig_snr = []
+sample = []
+fig_tseries = []
+
+"""
+
+We add different levels of noise to the ar_seq variable, in order to
+demonstrate the effects of adding noise on signal to noise ratio, as well as
+the calculated information
+
+"""
+
+for idx, noise in enumerate([1, 10, 50, 100]):
+
+ """
+
+ Make n_trials repetitions of the signal:
+
+ """
+
+ sample.append(np.ones((n_trials, ar_seq.shape[-1])) + ar_seq)
+ n_points = sample[-1].shape[-1]
+
+ """
+
+ Add noise:
+
+ """
+
+ for trial in xrange(n_trials):
+ sample[-1][trial] += np.random.randn(sample[-1][trial].shape[0]) * noise
+
+ """
+
+ This is the estimate of the signal:
+
+ """
+
+ sample_mean = np.mean(sample[-1], 0)
+
+ """
+
+ We plot a comparison of the actual signal (blue) and this estimate(blue). The
+ thinner lines n other colors, represent the individual trials:
+
+ """
+
+ fig_tseries.append(plt.figure())
+ ax = fig_tseries[-1].add_subplot(1, 1, 1)
+ ax.plot(sample[-1].T)
+ ax.plot(ar_seq, 'b', linewidth=4)
+ ax.plot(sample_mean, 'r', linewidth=4)
+ ax.set_xlabel('Time')
+ ax.set_ylabel('Amplitude')
+
+ """
+
+ We present this at different levels of noise. With low noise, the estimate
+ of the signal and also the response of the system at different repetitions
+ is very similar to the original signal.
+
+ """
+
+ tseries = ts.TimeSeries(sample[-1], sampling_rate=1.)
+ fig_snr.append(viz.plot_snr(tseries))
+
+"""
+
+.. image:: fig/snr_example_01.png
+
+A special visualization function :func:`viz.plot_snr` is used in order to
+display the signal power (blue) and the noise power (green), both in the
+left sub-plot. In addition, the SNR (blue) and the cumulative information
+(as a function of frequency bands, starting from low frequencies, in red)
+are dislplayed in the right subplot.
+
+
+.. image:: fig/snr_example_02.png
+
+With more added noise, the estimate of the signal deviates further from the
+signal.
+
+.. image:: fig/snr_example_03.png
+
+The signal power remains rather similar, but the noise power increases
+(across all bands). As a consequence, the signal to noise ratio decreases and the
+accumulated information decreases
+
+.. image:: fig/snr_example_04.png
+
+This becomes even more apparent with more noise:
+
+.. image:: fig/snr_example_05.png
+
+
+.. image:: fig/snr_example_06.png
+
+
+Until, with the largest amplitued of noise, the signal power is almost completely
+overwhelmed with noise:
+
+.. image:: fig/snr_example_07.png
+
+.. image:: fig/snr_example_08.png
+
+Finally, we use :func:`plot_snr_diff` in order to compare information
+transmission (on the left) and the signal to noise ratio (on the right) between
+the two last noise levels:
+"""
+
+ts1 = ts.TimeSeries(sample[-1], sampling_rate=1.)
+ts2 = ts.TimeSeries(sample[-2], sampling_rate=1.)
+fig_compare = viz.plot_snr_diff(ts1, ts2)
+plt.show()
+
+"""
+
+.. image:: fig/snr_example_09.png
+
+
+References
+
+ .. [Hsu2004] Hsu A, Borst A and Theunissen, FE (2004) Quantifying
+ variability in neural responses ans its application for the validation of
+ model predictions. Network: Comput Neural Syst 15:91-109
+
+ .. [Borst1999] Borst A and Theunissen FE (1999) Information theory and
+ neural coding. Nat Neurosci 2:947-957
+
+
+"""
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..20ba3dd
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,25 @@
+===========
+NiTime Home
+===========
+
+:mod:`Nitime` is a library for time-series analysis of data from neuroscience
+experiments.
+
+It contains a core of numerical algorithms for time-series analysis both in the
+time and spectral domains, a set of container objects to represent time-series,
+and auxiliary objects that expose a high level interface to the numerical
+machinery and make common analysis tasks easy to express with compact and
+semantically clear code.
+
+.. toctree::
+ :maxdepth: 1
+
+ documentation
+ whatsnew/index
+ examples/index
+
+Questions? Comments? Please `email`_ us.
+
+To follow along with the development, and join in the fun, head over to our `github webpage <http://github.com/nipy/nitime/>`_.
+
+.. _email: http://projects.scipy.org/mailman/listinfo/nipy-devel
diff --git a/doc/links_names.txt b/doc/links_names.txt
new file mode 100644
index 0000000..880ee1f
--- /dev/null
+++ b/doc/links_names.txt
@@ -0,0 +1,110 @@
+.. This (-*- rst -*-) format file contains commonly used link targets
+ and name substitutions. It may be included in many files,
+ therefore it should only contain link targets and name
+ substitutions. Try grepping for "^\.\. _" to find plausible
+ candidates for this list.
+
+.. NOTE: reST targets are
+ __not_case_sensitive__, so only one target definition is needed for
+ nipy, NIPY, Nipy, etc...
+
+.. _nipy: http://nipy.sourceforge.net/nipy
+.. _`NIPY developer resources`: http://nipy.sourceforge.net/devel
+.. _`Brain Imaging Center`: http://bic.berkeley.edu/
+.. _`Nibabel`: http://nipy.org/nibabel
+
+.. _gh-download: http://github.com/nipy/nitime/downloads
+.. _gh-archive: http://github.com/nipy/nitime/archives/master
+.. _nitime-pypi: http://pypi.python.org/pypi/nitime
+
+.. Documentation tools
+.. _graphviz: http://www.graphviz.org/
+.. _Sphinx: http://sphinx.pocoo.org/
+.. _`Sphinx reST`: http://sphinx.pocoo.org/rest.html
+.. _reST: http://docutils.sourceforge.net/rst.html
+.. _docutils: http://docutils.sourceforge.net
+
+.. Licenses
+.. _GPL: http://www.gnu.org/licenses/gpl.html
+.. _BSD: http://www.opensource.org/licenses/bsd-license.php
+.. _LGPL: http://www.gnu.org/copyleft/lesser.html
+
+.. Working process
+.. _pynifti: http://niftilib.sourceforge.net/pynifti/
+.. _nifticlibs: http://nifti.nimh.nih.gov
+.. _nifti: http://nifti.nimh.nih.gov
+.. _`nipy sourceforge`: http://nipy.sourceforge.net/
+.. _sourceforge: http://nipy.sourceforge.net/
+.. _`nipy launchpad`: https://launchpad.net/nipy
+.. _launchpad: https://launchpad.net/
+.. _`nipy trunk`: https://code.launchpad.net/~nipy-developers/nipy/trunk
+.. _`nipy mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel
+.. _`nipy bugs`: https://bugs.launchpad.net/nipy
+
+.. Code support stuff
+.. _pychecker: http://pychecker.sourceforge.net/
+.. _pylint: http://www.logilab.org/project/pylint
+.. _pyflakes: http://divmod.org/trac/wiki/DivmodPyflakes
+.. _virtualenv: http://pypi.python.org/pypi/virtualenv
+.. _git: http://git.or.cz/
+.. _flymake: http://flymake.sourceforge.net/
+.. _rope: http://rope.sourceforge.net/
+.. _pymacs: http://pymacs.progiciels-bpi.ca/pymacs.html
+.. _ropemacs: http://rope.sourceforge.net/ropemacs.html
+.. _ECB: http://ecb.sourceforge.net/
+.. _emacs_python_mode: http://www.emacswiki.org/cgi-bin/wiki/PythonMode
+.. _doctest-mode: http://www.cis.upenn.edu/~edloper/projects/doctestmode/
+.. _bazaar: http://bazaar-vcs.org/
+.. _subversion: http://subversion.tigris.org/
+.. _nose: http://somethingaboutorange.com/mrl/projects/nose
+.. _`python coverage tester`: http://nedbatchelder.com/code/modules/coverage.html
+.. _easy-install: http://packages.python.org/distribute/easy_install.html
+
+.. Other python projects
+.. _numpy: http://www.scipy.org/NumPy
+.. _scipy: http://www.scipy.org
+.. _ipython: http://ipython.scipy.org
+.. _`ipython manual`: http://ipython.scipy.org/doc/manual/html
+.. _matplotlib: http://matplotlib.sourceforge.net
+.. _ETS: http://code.enthought.com/projects/tool-suite.php
+.. _`Enthought Tool Suite`: http://code.enthought.com/projects/tool-suite.php
+.. _python: http://www.python.org
+.. _mayavi: http://mayavi.sourceforge.net/
+.. _sympy: http://code.google.com/p/sympy/
+.. _networkx: http://networkx.lanl.gov/
+
+.. Python imaging projects
+.. _PyMVPA: http://www.pymvpa.org
+.. _BrainVISA: http://brainvisa.info
+.. _anatomist: http://brainvisa.info
+
+.. Not so python imaging projects
+.. _matlab: http://www.mathworks.com
+.. _spm: http://www.fil.ion.ucl.ac.uk/spm
+.. _eeglab: http://sccn.ucsd.edu/eeglab
+.. _AFNI: http://afni.nimh.nih.gov/afni
+.. _FSL: http://www.fmrib.ox.ac.uk/fsl
+.. _FreeSurfer: http://surfer.nmr.mgh.harvard.edu
+.. _voxbo: http://www.voxbo.org
+
+.. General software
+.. _gcc: http://gcc.gnu.org
+.. _xcode: http://developer.apple.com/TOOLS/xcode
+.. _mingw: http://www.mingw.org
+.. _macports: http://www.macports.org/
+
+.. Functional imaging labs
+.. _`functional imaging laboratory`: http://www.fil.ion.ucl.ac.uk
+.. _FMRIB: http://www.fmrib.ox.ac.uk
+
+.. Other organizations
+.. _enthought: <http://www.enthought.com>
+.. _kitware: http://www.kitware.com
+
+.. General information links
+.. _`wikipedia FMRI`: http://en.wikipedia.org/wiki/Functional_magnetic_resonance_imaging
+.. _`wikipedia PET`: http://en.wikipedia.org/wiki/Positron_emission_tomography
+
+.. Mathematical methods
+.. _`wikipedia ICA`: http://en.wikipedia.org/wiki/Independent_component_analysis
+.. _`wikipedia PCA`: http://en.wikipedia.org/wiki/Principal_component_analysis
diff --git a/doc/news.rst b/doc/news.rst
new file mode 100644
index 0000000..6781322
--- /dev/null
+++ b/doc/news.rst
@@ -0,0 +1,15 @@
+=============
+ Nitime news
+=============
+
+June XX: version 0.5 released.
+
+June 19 2012: Version 0.4 released.
+
+August 13 2011: Version 0.3 released.
+
+November 9 2010: Version 0.2 released.
+
+August 18 2010: Nitime version 0.1 is officially out.
+
+Scipy 2009 conference paper `available <_static/Scipy2009Nitime.pdf>`_
diff --git a/doc/sphinxext/README.txt b/doc/sphinxext/README.txt
new file mode 100644
index 0000000..8e7e56a
--- /dev/null
+++ b/doc/sphinxext/README.txt
@@ -0,0 +1,24 @@
+===================
+ Sphinx Extensions
+===================
+
+We've copied these sphinx extensions over from nipy-core. Any edits
+should be done upstream in nipy-core, not here in nitime!
+
+These are a few sphinx extensions we are using to build the nipy
+documentation. In this file we list where they each come from, since we intend
+to always push back upstream any modifications or improvements we make to them.
+
+It's worth noting that some of these are being carried (as copies) by more
+than one project. Hopefully once they mature a little more, they will be
+incorproated back into sphinx itself, so that all projects can use a common
+base.
+
+* From numpy:
+ * docscrape.py
+ * docscrape_sphinx.py
+ * numpydoc.py
+
+* From matplotlib:
+ * ipython_console_highlighting.py
+ * only_directives.py
diff --git a/doc/sphinxext/docscrape.py b/doc/sphinxext/docscrape.py
new file mode 100644
index 0000000..3999cc9
--- /dev/null
+++ b/doc/sphinxext/docscrape.py
@@ -0,0 +1,497 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+
+class Reader(object):
+ """A line-based string reader.
+
+ """
+ def __init__(self, data):
+ """
+ Parameters
+ ----------
+ data : str
+ String with lines separated by '\n'.
+
+ """
+ if isinstance(data,list):
+ self._str = data
+ else:
+ self._str = data.split('\n') # store string as list of lines
+
+ self.reset()
+
+ def __getitem__(self, n):
+ return self._str[n]
+
+ def reset(self):
+ self._l = 0 # current line nr
+
+ def read(self):
+ if not self.eof():
+ out = self[self._l]
+ self._l += 1
+ return out
+ else:
+ return ''
+
+ def seek_next_non_empty_line(self):
+ for l in self[self._l:]:
+ if l.strip():
+ break
+ else:
+ self._l += 1
+
+ def eof(self):
+ return self._l >= len(self._str)
+
+ def read_to_condition(self, condition_func):
+ start = self._l
+ for line in self[start:]:
+ if condition_func(line):
+ return self[start:self._l]
+ self._l += 1
+ if self.eof():
+ return self[start:self._l+1]
+ return []
+
+ def read_to_next_empty_line(self):
+ self.seek_next_non_empty_line()
+ def is_empty(line):
+ return not line.strip()
+ return self.read_to_condition(is_empty)
+
+ def read_to_next_unindented_line(self):
+ def is_unindented(line):
+ return (line.strip() and (len(line.lstrip()) == len(line)))
+ return self.read_to_condition(is_unindented)
+
+ def peek(self,n=0):
+ if self._l + n < len(self._str):
+ return self[self._l + n]
+ else:
+ return ''
+
+ def is_empty(self):
+ return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+ def __init__(self,docstring):
+ docstring = textwrap.dedent(docstring).split('\n')
+
+ self._doc = Reader(docstring)
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': [''],
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Returns': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'Other Parameters': [],
+ 'Attributes': [],
+ 'Methods': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'Warnings': [],
+ 'References': '',
+ 'Examples': '',
+ 'index': {}
+ }
+
+ self._parse()
+
+ def __getitem__(self,key):
+ return self._parsed_data[key]
+
+ def __setitem__(self,key,val):
+ if not self._parsed_data.has_key(key):
+ warn("Unknown section %s" % key)
+ else:
+ self._parsed_data[key] = val
+
+ def _is_at_section(self):
+ self._doc.seek_next_non_empty_line()
+
+ if self._doc.eof():
+ return False
+
+ l1 = self._doc.peek().strip() # e.g. Parameters
+
+ if l1.startswith('.. index::'):
+ return True
+
+ l2 = self._doc.peek(1).strip() # ---------- or ==========
+ return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+ def _strip(self,doc):
+ i = 0
+ j = 0
+ for i,line in enumerate(doc):
+ if line.strip(): break
+
+ for j,line in enumerate(doc[::-1]):
+ if line.strip(): break
+
+ return doc[i:len(doc)-j]
+
+ def _read_to_next_section(self):
+ section = self._doc.read_to_next_empty_line()
+
+ while not self._is_at_section() and not self._doc.eof():
+ if not self._doc.peek(-1).strip(): # previous line was empty
+ section += ['']
+
+ section += self._doc.read_to_next_empty_line()
+
+ return section
+
+ def _read_sections(self):
+ while not self._doc.eof():
+ data = self._read_to_next_section()
+ name = data[0].strip()
+
+ if name.startswith('..'): # index section
+ yield name, data[1:]
+ elif len(data) < 2:
+ yield StopIteration
+ else:
+ yield name, self._strip(data[2:])
+
+ def _parse_param_list(self,content):
+ r = Reader(content)
+ params = []
+ while not r.eof():
+ header = r.read().strip()
+ if ' : ' in header:
+ arg_name, arg_type = header.split(' : ')[:2]
+ else:
+ arg_name, arg_type = header, ''
+
+ desc = r.read_to_next_unindented_line()
+ desc = dedent_lines(desc)
+
+ params.append((arg_name,arg_type,desc))
+
+ return params
+
+
+ _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+ r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+ def _parse_see_also(self, content):
+ """
+ func_name : Descriptive text
+ continued text
+ another_func_name : Descriptive text
+ func_name1, func_name2, :meth:`func_name`, func_name3
+
+ """
+ items = []
+
+ def parse_item_name(text):
+ """Match ':role:`name`' or 'name'"""
+ m = self._name_rgx.match(text)
+ if m:
+ g = m.groups()
+ if g[1] is None:
+ return g[3], None
+ else:
+ return g[2], g[1]
+ raise ValueError("%s is not a item name" % text)
+
+ def push_item(name, rest):
+ if not name:
+ return
+ name, role = parse_item_name(name)
+ items.append((name, list(rest), role))
+ del rest[:]
+
+ current_func = None
+ rest = []
+
+ for line in content:
+ if not line.strip(): continue
+
+ m = self._name_rgx.match(line)
+ if m and line[m.end():].strip().startswith(':'):
+ push_item(current_func, rest)
+ current_func, line = line[:m.end()], line[m.end():]
+ rest = [line.split(':', 1)[1].strip()]
+ if not rest[0]:
+ rest = []
+ elif not line.startswith(' '):
+ push_item(current_func, rest)
+ current_func = None
+ if ',' in line:
+ for func in line.split(','):
+ push_item(func, [])
+ elif line.strip():
+ current_func = line
+ elif current_func is not None:
+ rest.append(line.strip())
+ push_item(current_func, rest)
+ return items
+
+ def _parse_index(self, section, content):
+ """
+ .. index: default
+ :refguide: something, else, and more
+
+ """
+ def strip_each_in(lst):
+ return [s.strip() for s in lst]
+
+ out = {}
+ section = section.split('::')
+ if len(section) > 1:
+ out['default'] = strip_each_in(section[1].split(','))[0]
+ for line in content:
+ line = line.split(':')
+ if len(line) > 2:
+ out[line[1]] = strip_each_in(line[2].split(','))
+ return out
+
+ def _parse_summary(self):
+ """Grab signature (if given) and summary"""
+ if self._is_at_section():
+ return
+
+ summary = self._doc.read_to_next_empty_line()
+ summary_str = " ".join([s.strip() for s in summary]).strip()
+ if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+ self['Signature'] = summary_str
+ if not self._is_at_section():
+ self['Summary'] = self._doc.read_to_next_empty_line()
+ else:
+ self['Summary'] = summary
+
+ if not self._is_at_section():
+ self['Extended Summary'] = self._read_to_next_section()
+
+ def _parse(self):
+ self._doc.reset()
+ self._parse_summary()
+
+ for (section,content) in self._read_sections():
+ if not section.startswith('..'):
+ section = ' '.join([s.capitalize() for s in section.split(' ')])
+ if section in ('Parameters', 'Attributes', 'Methods',
+ 'Returns', 'Raises', 'Warns'):
+ self[section] = self._parse_param_list(content)
+ elif section.startswith('.. index::'):
+ self['index'] = self._parse_index(section, content)
+ elif section == 'See Also':
+ self['See Also'] = self._parse_see_also(content)
+ else:
+ self[section] = content
+
+ # string conversion routines
+
+ def _str_header(self, name, symbol='-'):
+ return [name, len(name)*symbol]
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' '*indent + line]
+ return out
+
+ def _str_signature(self):
+ if self['Signature']:
+ return [self['Signature'].replace('*','\*')] + ['']
+ else:
+ return ['']
+
+ def _str_summary(self):
+ if self['Summary']:
+ return self['Summary'] + ['']
+ else:
+ return []
+
+ def _str_extended_summary(self):
+ if self['Extended Summary']:
+ return self['Extended Summary'] + ['']
+ else:
+ return []
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ for param,param_type,desc in self[name]:
+ out += ['%s : %s' % (param, param_type)]
+ out += self._str_indent(desc)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += self[name]
+ out += ['']
+ return out
+
+ def _str_see_also(self, func_role):
+ if not self['See Also']: return []
+ out = []
+ out += self._str_header("See Also")
+ last_had_desc = True
+ for func, desc, role in self['See Also']:
+ if role:
+ link = ':%s:`%s`' % (role, func)
+ elif func_role:
+ link = ':%s:`%s`' % (func_role, func)
+ else:
+ link = "`%s`_" % func
+ if desc or last_had_desc:
+ out += ['']
+ out += [link]
+ else:
+ out[-1] += ", %s" % link
+ if desc:
+ out += self._str_indent([' '.join(desc)])
+ last_had_desc = True
+ else:
+ last_had_desc = False
+ out += ['']
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.iteritems():
+ if section == 'default':
+ continue
+ out += [' :%s: %s' % (section, ', '.join(references))]
+ return out
+
+ def __str__(self, func_role=''):
+ out = []
+ out += self._str_signature()
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters','Returns','Raises'):
+ out += self._str_param_list(param_list)
+ out += self._str_section('Warnings')
+ out += self._str_see_also(func_role)
+ for s in ('Notes','References','Examples'):
+ out += self._str_section(s)
+ out += self._str_index()
+ return '\n'.join(out)
+
+
+def indent(str,indent=4):
+ indent_str = ' '*indent
+ if str is None:
+ return indent_str
+ lines = str.split('\n')
+ return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+ """Deindent a list of lines maximally"""
+ return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+ return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+ def __init__(self, func, role='func', doc=None):
+ self._f = func
+ self._role = role # e.g. "func" or "meth"
+ if doc is None:
+ doc = inspect.getdoc(func) or ''
+ try:
+ NumpyDocString.__init__(self, doc)
+ except ValueError, e:
+ print '*'*78
+ print "ERROR: '%s' while parsing `%s`" % (e, self._f)
+ print '*'*78
+ #print "Docstring follows:"
+ #print doclines
+ #print '='*78
+
+ if not self['Signature']:
+ func, func_name = self.get_func()
+ try:
+ # try to read signature
+ argspec = inspect.getargspec(func)
+ argspec = inspect.formatargspec(*argspec)
+ argspec = argspec.replace('*','\*')
+ signature = '%s%s' % (func_name, argspec)
+ except TypeError, e:
+ signature = '%s()' % func_name
+ self['Signature'] = signature
+
+ def get_func(self):
+ func_name = getattr(self._f, '__name__', self.__class__.__name__)
+ if inspect.isclass(self._f):
+ func = getattr(self._f, '__call__', self._f.__init__)
+ else:
+ func = self._f
+ return func, func_name
+
+ def __str__(self):
+ out = ''
+
+ func, func_name = self.get_func()
+ signature = self['Signature'].replace('*', '\*')
+
+ roles = {'func': 'function',
+ 'meth': 'method'}
+
+ if self._role:
+ if not roles.has_key(self._role):
+ print "Warning: invalid role %s" % self._role
+ out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
+ func_name)
+
+ out += super(FunctionDoc, self).__str__(func_role=self._role)
+ return out
+
+
+class ClassDoc(NumpyDocString):
+ def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None):
+ if not inspect.isclass(cls):
+ raise ValueError("Initialise using a class. Got %r" % cls)
+ self._cls = cls
+
+ if modulename and not modulename.endswith('.'):
+ modulename += '.'
+ self._mod = modulename
+ self._name = cls.__name__
+ self._func_doc = func_doc
+
+ if doc is None:
+ doc = pydoc.getdoc(cls)
+
+ NumpyDocString.__init__(self, doc)
+
+ @property
+ def methods(self):
+ return [name for name,func in inspect.getmembers(self._cls)
+ if not name.startswith('_') and callable(func)]
+
+ def __str__(self):
+ out = ''
+ out += super(ClassDoc, self).__str__()
+ out += "\n\n"
+
+ #for m in self.methods:
+ # print "Parsing `%s`" % m
+ # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
+ # out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
+
+ return out
+
+
diff --git a/doc/sphinxext/docscrape_sphinx.py b/doc/sphinxext/docscrape_sphinx.py
new file mode 100644
index 0000000..77ed271
--- /dev/null
+++ b/doc/sphinxext/docscrape_sphinx.py
@@ -0,0 +1,136 @@
+import re, inspect, textwrap, pydoc
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+ # string conversion routines
+ def _str_header(self, name, symbol='`'):
+ return ['.. rubric:: ' + name, '']
+
+ def _str_field_list(self, name):
+ return [':' + name + ':']
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' '*indent + line]
+ return out
+
+ def _str_signature(self):
+ return ['']
+ if self['Signature']:
+ return ['``%s``' % self['Signature']] + ['']
+ else:
+ return ['']
+
+ def _str_summary(self):
+ return self['Summary'] + ['']
+
+ def _str_extended_summary(self):
+ return self['Extended Summary'] + ['']
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_field_list(name)
+ out += ['']
+ for param,param_type,desc in self[name]:
+ out += self._str_indent(['**%s** : %s' % (param.strip(),
+ param_type)])
+ out += ['']
+ out += self._str_indent(desc,8)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += ['']
+ content = textwrap.dedent("\n".join(self[name])).split("\n")
+ out += content
+ out += ['']
+ return out
+
+ def _str_see_also(self, func_role):
+ out = []
+ if self['See Also']:
+ see_also = super(SphinxDocString, self)._str_see_also(func_role)
+ out = ['.. seealso::', '']
+ out += self._str_indent(see_also[2:])
+ return out
+
+ def _str_warnings(self):
+ out = []
+ if self['Warnings']:
+ out = ['.. warning::', '']
+ out += self._str_indent(self['Warnings'])
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ if len(idx) == 0:
+ return out
+
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.iteritems():
+ if section == 'default':
+ continue
+ elif section == 'refguide':
+ out += [' single: %s' % (', '.join(references))]
+ else:
+ out += [' %s: %s' % (section, ','.join(references))]
+ return out
+
+ def _str_references(self):
+ out = []
+ if self['References']:
+ out += self._str_header('References')
+ if isinstance(self['References'], str):
+ self['References'] = [self['References']]
+ out.extend(self['References'])
+ out += ['']
+ return out
+
+ def __str__(self, indent=0, func_role="obj"):
+ out = []
+ out += self._str_signature()
+ out += self._str_index() + ['']
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Attributes', 'Methods',
+ 'Returns','Raises'):
+ out += self._str_param_list(param_list)
+ out += self._str_warnings()
+ out += self._str_see_also(func_role)
+ out += self._str_section('Notes')
+ out += self._str_references()
+ out += self._str_section('Examples')
+ out = self._str_indent(out,indent)
+ return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+ pass
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+ pass
+
+def get_doc_object(obj, what=None, doc=None):
+ if what is None:
+ if inspect.isclass(obj):
+ what = 'class'
+ elif inspect.ismodule(obj):
+ what = 'module'
+ elif callable(obj):
+ what = 'function'
+ else:
+ what = 'object'
+ if what == 'class':
+ return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
+ elif what in ('function', 'method'):
+ return SphinxFunctionDoc(obj, '', doc=doc)
+ else:
+ if doc is None:
+ doc = pydoc.getdoc(obj)
+ return SphinxDocString(doc)
+
diff --git a/doc/sphinxext/github.py b/doc/sphinxext/github.py
new file mode 100644
index 0000000..d6215e6
--- /dev/null
+++ b/doc/sphinxext/github.py
@@ -0,0 +1,155 @@
+"""Define text roles for GitHub
+
+* ghissue - Issue
+* ghpull - Pull Request
+* ghuser - User
+
+Adapted from bitbucket example here:
+https://bitbucket.org/birkenfeld/sphinx-contrib/src/tip/bitbucket/sphinxcontrib/bitbucket.py
+
+Authors
+-------
+
+* Doug Hellmann
+* Min RK
+"""
+#
+# Original Copyright (c) 2010 Doug Hellmann. All rights reserved.
+#
+
+from docutils import nodes, utils
+from docutils.parsers.rst.roles import set_classes
+
+def make_link_node(rawtext, app, type, slug, options):
+ """Create a link to a github resource.
+
+ :param rawtext: Text being replaced with link node.
+ :param app: Sphinx application context
+ :param type: Link type (issues, changeset, etc.)
+ :param slug: ID of the thing to link to
+ :param options: Options dictionary passed to role func.
+ """
+
+ try:
+ base = app.config.github_project_url
+ if not base:
+ raise AttributeError
+ if not base.endswith('/'):
+ base += '/'
+ except AttributeError, err:
+ raise ValueError('github_project_url configuration value is not set (%s)' % str(err))
+
+ ref = base + type + '/' + slug + '/'
+ set_classes(options)
+ prefix = "#"
+ if type == 'pull':
+ prefix = "PR " + prefix
+ node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref,
+ **options)
+ return node
+
+def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+ """Link to a GitHub issue.
+
+ Returns 2 part tuple containing list of nodes to insert into the
+ document and a list of system messages. Both are allowed to be
+ empty.
+
+ :param name: The role name used in the document.
+ :param rawtext: The entire markup snippet, with role.
+ :param text: The text marked with the role.
+ :param lineno: The line number where rawtext appears in the input.
+ :param inliner: The inliner instance that called us.
+ :param options: Directive options for customization.
+ :param content: The directive content for customization.
+ """
+
+ try:
+ issue_num = int(text)
+ if issue_num <= 0:
+ raise ValueError
+ except ValueError:
+ msg = inliner.reporter.error(
+ 'GitHub issue number must be a number greater than or equal to 1; '
+ '"%s" is invalid.' % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ app = inliner.document.settings.env.app
+ #app.info('issue %r' % text)
+ if 'pull' in name.lower():
+ category = 'pull'
+ elif 'issue' in name.lower():
+ category = 'issues'
+ else:
+ msg = inliner.reporter.error(
+ 'GitHub roles include "ghpull" and "ghissue", '
+ '"%s" is invalid.' % name, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ node = make_link_node(rawtext, app, category, str(issue_num), options)
+ return [node], []
+
+def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+ """Link to a GitHub user.
+
+ Returns 2 part tuple containing list of nodes to insert into the
+ document and a list of system messages. Both are allowed to be
+ empty.
+
+ :param name: The role name used in the document.
+ :param rawtext: The entire markup snippet, with role.
+ :param text: The text marked with the role.
+ :param lineno: The line number where rawtext appears in the input.
+ :param inliner: The inliner instance that called us.
+ :param options: Directive options for customization.
+ :param content: The directive content for customization.
+ """
+ app = inliner.document.settings.env.app
+ #app.info('user link %r' % text)
+ ref = 'https://www.github.com/' + text
+ node = nodes.reference(rawtext, text, refuri=ref, **options)
+ return [node], []
+
+def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+ """Link to a GitHub commit.
+
+ Returns 2 part tuple containing list of nodes to insert into the
+ document and a list of system messages. Both are allowed to be
+ empty.
+
+ :param name: The role name used in the document.
+ :param rawtext: The entire markup snippet, with role.
+ :param text: The text marked with the role.
+ :param lineno: The line number where rawtext appears in the input.
+ :param inliner: The inliner instance that called us.
+ :param options: Directive options for customization.
+ :param content: The directive content for customization.
+ """
+ app = inliner.document.settings.env.app
+ #app.info('user link %r' % text)
+ try:
+ base = app.config.github_project_url
+ if not base:
+ raise AttributeError
+ if not base.endswith('/'):
+ base += '/'
+ except AttributeError, err:
+ raise ValueError('github_project_url configuration value is not set (%s)' % str(err))
+
+ ref = base + text
+ node = nodes.reference(rawtext, text[:6], refuri=ref, **options)
+ return [node], []
+
+
+def setup(app):
+ """Install the plugin.
+
+ :param app: Sphinx application context.
+ """
+ app.info('Initializing GitHub plugin')
+ app.add_role('ghissue', ghissue_role)
+ app.add_role('ghpull', ghissue_role)
+ app.add_role('ghuser', ghuser_role)
+ app.add_role('ghcommit', ghcommit_role)
+ app.add_config_value('github_project_url', None, 'env')
+ return
diff --git a/doc/sphinxext/ipython_console_highlighting.py b/doc/sphinxext/ipython_console_highlighting.py
new file mode 100644
index 0000000..00f9abd
--- /dev/null
+++ b/doc/sphinxext/ipython_console_highlighting.py
@@ -0,0 +1,98 @@
+"""reST directive for syntax-highlighting ipython interactive sessions.
+"""
+
+#-----------------------------------------------------------------------------
+# Needed modules
+
+# Standard library
+import re
+
+# Third party
+from pygments.lexer import Lexer, do_insertions
+from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
+ PythonTracebackLexer)
+from pygments.token import Comment, Generic
+
+from sphinx import highlighting
+
+
+#-----------------------------------------------------------------------------
+# Global constants
+line_re = re.compile('.*?\n')
+
+#-----------------------------------------------------------------------------
+# Code begins - classes and functions
+
+class IPythonConsoleLexer(Lexer):
+ """
+ For IPython console output or doctests, such as:
+
+ .. sourcecode:: ipython
+
+ In [1]: a = 'foo'
+
+ In [2]: a
+ Out[2]: 'foo'
+
+ In [3]: print a
+ foo
+
+ In [4]: 1 / 0
+
+ Notes:
+
+ - Tracebacks are not currently supported.
+
+ - It assumes the default IPython prompts, not customized ones.
+ """
+
+ name = 'IPython console session'
+ aliases = ['ipython']
+ mimetypes = ['text/x-ipython-console']
+ input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
+ output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
+ continue_prompt = re.compile(" \.\.\.+:")
+ tb_start = re.compile("\-+")
+
+ def get_tokens_unprocessed(self, text):
+ pylexer = PythonLexer(**self.options)
+ tblexer = PythonTracebackLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ for match in line_re.finditer(text):
+ line = match.group()
+ input_prompt = self.input_prompt.match(line)
+ continue_prompt = self.continue_prompt.match(line.rstrip())
+ output_prompt = self.output_prompt.match(line)
+ if line.startswith("#"):
+ insertions.append((len(curcode),
+ [(0, Comment, line)]))
+ elif input_prompt is not None:
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, input_prompt.group())]))
+ curcode += line[input_prompt.end():]
+ elif continue_prompt is not None:
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, continue_prompt.group())]))
+ curcode += line[continue_prompt.end():]
+ elif output_prompt is not None:
+ insertions.append((len(curcode),
+ [(0, Generic.Output, output_prompt.group())]))
+ curcode += line[output_prompt.end():]
+ else:
+ if curcode:
+ for item in do_insertions(insertions,
+ pylexer.get_tokens_unprocessed(curcode)):
+ yield item
+ curcode = ''
+ insertions = []
+ yield match.start(), Generic.Output, line
+ if curcode:
+ for item in do_insertions(insertions,
+ pylexer.get_tokens_unprocessed(curcode)):
+ yield item
+
+#-----------------------------------------------------------------------------
+# Register the extension as a valid pygments lexer
+highlighting.lexers['ipython'] = IPythonConsoleLexer()
diff --git a/doc/sphinxext/math_dollar.py b/doc/sphinxext/math_dollar.py
new file mode 100644
index 0000000..2ad2d3c
--- /dev/null
+++ b/doc/sphinxext/math_dollar.py
@@ -0,0 +1,56 @@
+import re
+
+def process_dollars(app, docname, source):
+ r"""
+ Replace dollar signs with backticks.
+
+ More precisely, do a regular expression search. Replace a plain
+ dollar sign ($) by a backtick (`). Replace an escaped dollar sign
+ (\$) by a dollar sign ($). Don't change a dollar sign preceded or
+ followed by a backtick (`$ or $`), because of strings like
+ "``$HOME``". Don't make any changes on lines starting with
+ spaces, because those are indented and hence part of a block of
+ code or examples.
+
+ This also doesn't replaces dollar signs enclosed in curly braces,
+ to avoid nested math environments, such as ::
+
+ $f(n) = 0 \text{ if $n$ is prime}$
+
+ Thus the above line would get changed to
+
+ `f(n) = 0 \text{ if $n$ is prime}`
+ """
+ s = "\n".join(source)
+ if s.find("$") == -1:
+ return
+ # This searches for "$blah$" inside a pair of curly braces --
+ # don't change these, since they're probably coming from a nested
+ # math environment. So for each match, we replace it with a temporary
+ # string, and later on we substitute the original back.
+ global _data
+ _data = {}
+ def repl(matchobj):
+ global _data
+ s = matchobj.group(0)
+ t = "___XXX_REPL_%d___" % len(_data)
+ _data[t] = s
+ return t
+ s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s)
+
+ # matches $...$
+ dollars = re.compile(r"(?<!\$)(?<!\\)\$([^\$]+?)\$")
+ # regular expression for \$
+ slashdollar = re.compile(r"\\\$")
+
+ s = dollars.sub(r":math:`\1`", s)
+
+ s = slashdollar.sub(r"$", s)
+ # change the original {...} things in:
+ for r in _data:
+ s = s.replace(r, _data[r])
+ # now save results in "source"
+ source[:] = [s]
+
+def setup(app):
+ app.connect("source-read", process_dollars)
diff --git a/doc/sphinxext/numpydoc.py b/doc/sphinxext/numpydoc.py
new file mode 100644
index 0000000..ff6c44c
--- /dev/null
+++ b/doc/sphinxext/numpydoc.py
@@ -0,0 +1,116 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
+
+"""
+
+import os, re, pydoc
+from docscrape_sphinx import get_doc_object, SphinxDocString
+import inspect
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+ reference_offset=[0]):
+ if what == 'module':
+ # Strip top title
+ title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+ re.I|re.S)
+ lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
+ else:
+ doc = get_doc_object(obj, what, "\n".join(lines))
+ lines[:] = str(doc).split("\n")
+
+ if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+ obj.__name__:
+ if hasattr(obj, '__module__'):
+ v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__))
+ else:
+ v = dict(full_name=obj.__name__)
+ lines += ['', '.. htmlonly::', '']
+ lines += [' %s' % x for x in
+ (app.config.numpydoc_edit_link % v).split("\n")]
+
+ # replace reference numbers so that there are no duplicates
+ references = []
+ for l in lines:
+ l = l.strip()
+ if l.startswith('.. ['):
+ try:
+ references.append(int(l[len('.. ['):l.index(']')]))
+ except ValueError:
+ print "WARNING: invalid reference in %s docstring" % name
+
+ # Start renaming from the biggest number, otherwise we may
+ # overwrite references.
+ references.sort()
+ if references:
+ for i, line in enumerate(lines):
+ for r in references:
+ new_r = reference_offset[0] + r
+ lines[i] = lines[i].replace('[%d]_' % r,
+ '[%d]_' % new_r)
+ lines[i] = lines[i].replace('.. [%d]' % r,
+ '.. [%d]' % new_r)
+
+ reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+ # Do not try to inspect classes that don't define `__init__`
+ if (inspect.isclass(obj) and
+ 'initializes x; see ' in pydoc.getdoc(obj.__init__)):
+ return '', ''
+
+ if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
+ if not hasattr(obj, '__doc__'): return
+
+ doc = SphinxDocString(pydoc.getdoc(obj))
+ if doc['Signature']:
+ sig = re.sub("^[^(]*", "", doc['Signature'])
+ return sig, ''
+
+def initialize(app):
+ try:
+ app.connect('autodoc-process-signature', mangle_signature)
+ except:
+ monkeypatch_sphinx_ext_autodoc()
+
+def setup(app, get_doc_object_=get_doc_object):
+ global get_doc_object
+ get_doc_object = get_doc_object_
+
+ app.connect('autodoc-process-docstring', mangle_docstrings)
+ app.connect('builder-inited', initialize)
+ app.add_config_value('numpydoc_edit_link', None, True)
+
+#------------------------------------------------------------------------------
+# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
+#------------------------------------------------------------------------------
+
+def monkeypatch_sphinx_ext_autodoc():
+ global _original_format_signature
+ import sphinx.ext.autodoc
+
+ if sphinx.ext.autodoc.format_signature is our_format_signature:
+ return
+
+ print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
+ _original_format_signature = sphinx.ext.autodoc.format_signature
+ sphinx.ext.autodoc.format_signature = our_format_signature
+
+def our_format_signature(what, obj):
+ r = mangle_signature(None, what, None, obj, None, None, None)
+ if r is not None:
+ return r[0]
+ else:
+ return _original_format_signature(what, obj)
diff --git a/doc/sphinxext/only_directives.py b/doc/sphinxext/only_directives.py
new file mode 100644
index 0000000..ffb4d84
--- /dev/null
+++ b/doc/sphinxext/only_directives.py
@@ -0,0 +1,63 @@
+#
+# A pair of directives for inserting content that will only appear in
+# either html or latex.
+#
+
+from docutils.nodes import Body, Element
+from docutils.parsers.rst import directives
+
+class only_base(Body, Element):
+ def dont_traverse(self, *args, **kwargs):
+ return []
+
+class html_only(only_base):
+ pass
+
+class latex_only(only_base):
+ pass
+
+def run(content, node_class, state, content_offset):
+ text = '\n'.join(content)
+ node = node_class(text)
+ state.nested_parse(content, content_offset, node)
+ return [node]
+
+def html_only_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return run(content, html_only, state, content_offset)
+
+def latex_only_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return run(content, latex_only, state, content_offset)
+
+def builder_inited(app):
+ if app.builder.name == 'html':
+ latex_only.traverse = only_base.dont_traverse
+ else:
+ html_only.traverse = only_base.dont_traverse
+
+def setup(app):
+ app.add_directive('htmlonly', html_only_directive, True, (0, 0, 0))
+ app.add_directive('latexonly', latex_only_directive, True, (0, 0, 0))
+ app.add_node(html_only)
+ app.add_node(latex_only)
+
+ # This will *really* never see the light of day As it turns out,
+ # this results in "broken" image nodes since they never get
+ # processed, so best not to do this.
+ # app.connect('builder-inited', builder_inited)
+
+ # Add visit/depart methods to HTML-Translator:
+ def visit_perform(self, node):
+ pass
+ def depart_perform(self, node):
+ pass
+ def visit_ignore(self, node):
+ node.children = []
+ def depart_ignore(self, node):
+ node.children = []
+
+ app.add_node(html_only, html=(visit_perform, depart_perform))
+ app.add_node(html_only, latex=(visit_ignore, depart_ignore))
+ app.add_node(latex_only, latex=(visit_perform, depart_perform))
+ app.add_node(latex_only, html=(visit_ignore, depart_ignore))
diff --git a/doc/sphinxext/plot_directive.py b/doc/sphinxext/plot_directive.py
new file mode 100644
index 0000000..3f0963b
--- /dev/null
+++ b/doc/sphinxext/plot_directive.py
@@ -0,0 +1,489 @@
+"""A special directive for including a matplotlib plot.
+
+The source code for the plot may be included in one of two ways:
+
+ 1. A path to a source file as the argument to the directive::
+
+ .. plot:: path/to/plot.py
+
+ When a path to a source file is given, the content of the
+ directive may optionally contain a caption for the plot::
+
+ .. plot:: path/to/plot.py
+
+ This is the caption for the plot
+
+ Additionally, one my specify the name of a function to call (with
+ no arguments) immediately after importing the module::
+
+ .. plot:: path/to/plot.py plot_function1
+
+ 2. Included as inline content to the directive::
+
+ .. plot::
+
+ import matplotlib.pyplot as plt
+ import matplotlib.image as mpimg
+ import numpy as np
+ img = mpimg.imread('_static/stinkbug.png')
+ imgplot = plt.imshow(img)
+
+In HTML output, `plot` will include a .png file with a link to a high-res
+.png and .pdf. In LaTeX output, it will include a .pdf.
+
+To customize the size of the plot, this directive supports all of the
+options of the `image` directive, except for `target` (since plot will
+add its own target). These include `alt`, `height`, `width`, `scale`,
+`align` and `class`.
+
+Additionally, if the `:include-source:` option is provided, the
+literal source will be displayed inline in the text, (as well as a
+link to the source in HTML). If this source file is in a non-UTF8 or
+non-ASCII encoding, the encoding must be specified using the
+`:encoding:` option.
+
+The set of file formats to generate can be specified with the
+`plot_formats` configuration variable.
+"""
+
+import sys, os, shutil, imp, warnings, cStringIO, re
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+from docutils.parsers.rst import directives
+try:
+ # docutils 0.4
+ from docutils.parsers.rst.directives.images import align
+except ImportError:
+ # docutils 0.5
+ from docutils.parsers.rst.directives.images import Image
+ align = Image.align
+import sphinx
+
+sphinx_version = sphinx.__version__.split(".")
+# The split is necessary for sphinx beta versions where the string is
+# '6b1'
+sphinx_version = tuple([int(re.split('[a-z]', x)[0])
+ for x in sphinx_version[:2]])
+
+import matplotlib
+import matplotlib.cbook as cbook
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import matplotlib.image as image
+from matplotlib import _pylab_helpers
+from matplotlib.sphinxext import only_directives
+
+
+class PlotWarning(Warning):
+ """Warning category for all warnings generated by this directive.
+
+ By printing our warnings with this category, it becomes possible to turn
+ them into errors by using in your conf.py::
+
+ warnings.simplefilter('error', plot_directive.PlotWarning)
+
+ This way, you can ensure that your docs only build if all your examples
+ actually run successfully.
+ """
+ pass
+
+
+# os.path.relpath is new in Python 2.6
+if hasattr(os.path, 'relpath'):
+ relpath = os.path.relpath
+else:
+ # This code is snagged from Python 2.6
+
+ def relpath(target, base=os.curdir):
+ """
+ Return a relative path to the target from either the current dir or an optional base dir.
+ Base can be a directory specified either as absolute or relative to current dir.
+ """
+
+ if not os.path.exists(target):
+ raise OSError, 'Target does not exist: '+target
+
+ if not os.path.isdir(base):
+ raise OSError, 'Base is not a directory or does not exist: '+base
+
+ base_list = (os.path.abspath(base)).split(os.sep)
+ target_list = (os.path.abspath(target)).split(os.sep)
+
+ # On the windows platform the target may be on a completely
+ # different drive from the base.
+ if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
+ raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
+
+ # Starting from the filepath root, work out how much of the
+ # filepath is shared by base and target.
+ for i in range(min(len(base_list), len(target_list))):
+ if base_list[i] <> target_list[i]: break
+ else:
+ # If we broke out of the loop, i is pointing to the first
+ # differing path elements. If we didn't break out of the
+ # loop, i is pointing to identical path elements.
+ # Increment i so that in all cases it points to the first
+ # differing path elements.
+ i+=1
+
+ rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
+ if rel_list:
+ return os.path.join(*rel_list)
+ else:
+ return ""
+
+template = """
+.. htmlonly::
+
+ %(links)s
+
+ .. figure:: %(prefix)s%(tmpdir)s/%(outname)s.png
+%(options)s
+
+%(caption)s
+
+.. latexonly::
+ .. figure:: %(prefix)s%(tmpdir)s/%(outname)s.pdf
+%(options)s
+
+%(caption)s
+
+"""
+
+exception_template = """
+.. htmlonly::
+
+ [`source code <%(linkdir)s/%(basename)s.py>`__]
+
+Exception occurred rendering plot.
+
+"""
+
+template_content_indent = ' '
+
+def out_of_date(original, derived):
+ """
+ Returns True if derivative is out-of-date wrt original,
+ both of which are full file paths.
+ """
+ return (not os.path.exists(derived) or
+ (os.path.exists(original) and
+ os.stat(derived).st_mtime < os.stat(original).st_mtime))
+
+def run_code(plot_path, function_name, plot_code):
+ """
+ Import a Python module from a path, and run the function given by
+ name, if function_name is not None.
+ """
+ # Change the working directory to the directory of the example, so
+ # it can get at its data files, if any. Add its path to sys.path
+ # so it can import any helper modules sitting beside it.
+ if plot_code is not None:
+ exec(plot_code)
+ else:
+ pwd = os.getcwd()
+ path, fname = os.path.split(plot_path)
+ sys.path.insert(0, os.path.abspath(path))
+ stdout = sys.stdout
+ sys.stdout = cStringIO.StringIO()
+ os.chdir(path)
+ fd = None
+ try:
+ fd = open(fname)
+ module = imp.load_module(
+ "__plot__", fd, fname, ('py', 'r', imp.PY_SOURCE))
+ finally:
+ del sys.path[0]
+ os.chdir(pwd)
+ sys.stdout = stdout
+ if fd is not None:
+ fd.close()
+
+ if function_name is not None:
+ getattr(module, function_name)()
+
+def run_savefig(plot_path, basename, tmpdir, destdir, formats):
+ """
+ Once a plot script has been imported, this function runs savefig
+ on all of the figures in all of the desired formats.
+ """
+ fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
+ for i, figman in enumerate(fig_managers):
+ for j, (format, dpi) in enumerate(formats):
+ if len(fig_managers) == 1:
+ outname = basename
+ else:
+ outname = "%s_%02d" % (basename, i)
+ outname = outname + "." + format
+ outpath = os.path.join(tmpdir, outname)
+ try:
+ figman.canvas.figure.savefig(outpath, dpi=dpi)
+ except:
+ s = cbook.exception_to_str("Exception saving plot %s" % plot_path)
+ warnings.warn(s, PlotWarning)
+ return 0
+ if j > 0:
+ shutil.copyfile(outpath, os.path.join(destdir, outname))
+
+ return len(fig_managers)
+
+def clear_state():
+ plt.close('all')
+ matplotlib.rcdefaults()
+ # Set a default figure size that doesn't overflow typical browser
+ # windows. The script is free to override it if necessary.
+ matplotlib.rcParams['figure.figsize'] = (5.5, 4.5)
+
+def render_figures(plot_path, function_name, plot_code, tmpdir, destdir,
+ formats):
+ """
+ Run a pyplot script and save the low and high res PNGs and a PDF
+ in outdir.
+ """
+ plot_path = str(plot_path) # todo, why is unicode breaking this
+ basedir, fname = os.path.split(plot_path)
+ basename, ext = os.path.splitext(fname)
+
+ all_exists = True
+
+ # Look for single-figure output files first
+ for format, dpi in formats:
+ outname = os.path.join(tmpdir, '%s.%s' % (basename, format))
+ if out_of_date(plot_path, outname):
+ all_exists = False
+ break
+
+ if all_exists:
+ return 1
+
+ # Then look for multi-figure output files, assuming
+ # if we have some we have all...
+ i = 0
+ while True:
+ all_exists = True
+ for format, dpi in formats:
+ outname = os.path.join(
+ tmpdir, '%s_%02d.%s' % (basename, i, format))
+ if out_of_date(plot_path, outname):
+ all_exists = False
+ break
+ if all_exists:
+ i += 1
+ else:
+ break
+
+ if i != 0:
+ return i
+
+ # We didn't find the files, so build them
+
+ clear_state()
+ try:
+ run_code(plot_path, function_name, plot_code)
+ except:
+ s = cbook.exception_to_str("Exception running plot %s" % plot_path)
+ warnings.warn(s, PlotWarning)
+ return 0
+
+ num_figs = run_savefig(plot_path, basename, tmpdir, destdir, formats)
+
+ if '__plot__' in sys.modules:
+ del sys.modules['__plot__']
+
+ return num_figs
+
+def _plot_directive(plot_path, basedir, function_name, plot_code, caption,
+ options, state_machine):
+ formats = setup.config.plot_formats
+ if type(formats) == str:
+ formats = eval(formats)
+
+ fname = os.path.basename(plot_path)
+ basename, ext = os.path.splitext(fname)
+
+ # Get the directory of the rst file, and determine the relative
+ # path from the resulting html file to the plot_directive links
+ # (linkdir). This relative path is used for html links *only*,
+ # and not the embedded image. That is given an absolute path to
+ # the temporary directory, and then sphinx moves the file to
+ # build/html/_images for us later.
+ rstdir, rstfile = os.path.split(state_machine.document.attributes['source'])
+ outdir = os.path.join('plot_directive', basedir)
+ reldir = relpath(setup.confdir, rstdir)
+ linkdir = os.path.join(reldir, outdir)
+
+ # tmpdir is where we build all the output files. This way the
+ # plots won't have to be redone when generating latex after html.
+
+ # Prior to Sphinx 0.6, absolute image paths were treated as
+ # relative to the root of the filesystem. 0.6 and after, they are
+ # treated as relative to the root of the documentation tree. We
+ # need to support both methods here.
+ tmpdir = os.path.join('build', outdir)
+ tmpdir = os.path.abspath(tmpdir)
+ if sphinx_version < (0, 6):
+ prefix = ''
+ else:
+ prefix = '/'
+ if not os.path.exists(tmpdir):
+ cbook.mkdirs(tmpdir)
+
+ # destdir is the directory within the output to store files
+ # that we'll be linking to -- not the embedded images.
+ destdir = os.path.abspath(os.path.join(setup.app.builder.outdir, outdir))
+ if not os.path.exists(destdir):
+ cbook.mkdirs(destdir)
+
+ # Properly indent the caption
+ caption = '\n'.join(template_content_indent + line.strip()
+ for line in caption.split('\n'))
+
+ # Generate the figures, and return the number of them
+ num_figs = render_figures(plot_path, function_name, plot_code, tmpdir,
+ destdir, formats)
+
+ # Now start generating the lines of output
+ lines = []
+
+ if plot_code is None:
+ shutil.copyfile(plot_path, os.path.join(destdir, fname))
+
+ if options.has_key('include-source'):
+ if plot_code is None:
+ lines.extend(
+ ['.. include:: %s' % os.path.join(setup.app.builder.srcdir, plot_path),
+ ' :literal:'])
+ if options.has_key('encoding'):
+ lines.append(' :encoding: %s' % options['encoding'])
+ del options['encoding']
+ else:
+ lines.extend(['::', ''])
+ lines.extend([' %s' % row.rstrip()
+ for row in plot_code.split('\n')])
+ lines.append('')
+ del options['include-source']
+ else:
+ lines = []
+
+ if num_figs > 0:
+ options = ['%s:%s: %s' % (template_content_indent, key, val)
+ for key, val in options.items()]
+ options = "\n".join(options)
+
+ for i in range(num_figs):
+ if num_figs == 1:
+ outname = basename
+ else:
+ outname = "%s_%02d" % (basename, i)
+
+ # Copy the linked-to files to the destination within the build tree,
+ # and add a link for them
+ links = []
+ if plot_code is None:
+ links.append('`source code <%(linkdir)s/%(basename)s.py>`__')
+ for format, dpi in formats[1:]:
+ links.append('`%s <%s/%s.%s>`__' % (format, linkdir, outname, format))
+ if len(links):
+ links = '[%s]' % (', '.join(links) % locals())
+ else:
+ links = ''
+
+ lines.extend((template % locals()).split('\n'))
+ else:
+ lines.extend((exception_template % locals()).split('\n'))
+
+ if len(lines):
+ state_machine.insert_input(
+ lines, state_machine.input_lines.source(0))
+
+ return []
+
+def plot_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """
+ Handle the arguments to the plot directive. The real work happens
+ in _plot_directive.
+ """
+ # The user may provide a filename *or* Python code content, but not both
+ if len(arguments):
+ plot_path = directives.uri(arguments[0])
+ basedir = relpath(os.path.dirname(plot_path), setup.app.builder.srcdir)
+
+ # If there is content, it will be passed as a caption.
+
+ # Indent to match expansion below. XXX - The number of spaces matches
+ # that of the 'options' expansion further down. This should be moved
+ # to common code to prevent them from diverging accidentally.
+ caption = '\n'.join(content)
+
+ # If the optional function name is provided, use it
+ if len(arguments) == 2:
+ function_name = arguments[1]
+ else:
+ function_name = None
+
+ return _plot_directive(plot_path, basedir, function_name, None, caption,
+ options, state_machine)
+ else:
+ plot_code = '\n'.join(content)
+
+ # Since we don't have a filename, use a hash based on the content
+ plot_path = md5(plot_code).hexdigest()[-10:]
+
+ return _plot_directive(plot_path, 'inline', None, plot_code, '', options,
+ state_machine)
+
+def mark_plot_labels(app, document):
+ """
+ To make plots referenceable, we need to move the reference from
+ the "htmlonly" (or "latexonly") node to the actual figure node
+ itself.
+ """
+ for name, explicit in document.nametypes.iteritems():
+ if not explicit:
+ continue
+ labelid = document.nameids[name]
+ if labelid is None:
+ continue
+ node = document.ids[labelid]
+ if node.tagname in ('html_only', 'latex_only'):
+ for n in node:
+ if n.tagname == 'figure':
+ sectname = name
+ for c in n:
+ if c.tagname == 'caption':
+ sectname = c.astext()
+ break
+
+ node['ids'].remove(labelid)
+ node['names'].remove(name)
+ n['ids'].append(labelid)
+ n['names'].append(name)
+ document.settings.env.labels[name] = \
+ document.settings.env.docname, labelid, sectname
+ break
+
+def setup(app):
+ setup.app = app
+ setup.config = app.config
+ setup.confdir = app.confdir
+
+ options = {'alt': directives.unchanged,
+ 'height': directives.length_or_unitless,
+ 'width': directives.length_or_percentage_or_unitless,
+ 'scale': directives.nonnegative_int,
+ 'align': align,
+ 'class': directives.class_option,
+ 'include-source': directives.flag,
+ 'encoding': directives.encoding }
+
+ app.add_directive('plot', plot_directive, True, (0, 2, 0), **options)
+ app.add_config_value(
+ 'plot_formats',
+ [('png', 80), ('hires.png', 200), ('pdf', 50)],
+ True)
+
+ app.connect('doctree-read', mark_plot_labels)
diff --git a/doc/users/index.rst b/doc/users/index.rst
new file mode 100644
index 0000000..d41de1f
--- /dev/null
+++ b/doc/users/index.rst
@@ -0,0 +1,18 @@
+.. _users-guide:
+
+============
+ User Guide
+============
+
+:Release: |version|
+:Date: |today|
+
+.. toctree::
+ :maxdepth: 2
+
+ quickstart
+ install
+ overview
+ tutorial
+
+
diff --git a/doc/users/install.rst b/doc/users/install.rst
new file mode 100644
index 0000000..34b665a
--- /dev/null
+++ b/doc/users/install.rst
@@ -0,0 +1,84 @@
+.. _install:
+
+======================
+ Download and install
+======================
+
+This page covers the necessary steps to install nitime. Below is a
+list of required dependencies, and some additional recommended software, which
+are dependencies for some particular functionality.
+
+Dependencies
+------------
+
+Must Have
+~~~~~~~~~
+
+Python_ 2.5 or later
+
+NumPy_ 1.3 or later
+
+SciPy_ 0.7 or later
+ Numpy and Scipy are high-level, optimized scientific computing libraries.
+
+Matplotlib_
+ Python plotting library. In particular, :mod:`Nitime` makes use of the
+ :mod:`matplotlib.mlab` module for some implementation of numerical algorithms
+
+Recommended/optional
+~~~~~~~~~~~~~~~~~~~~
+
+Sphinx_
+ Required for building the documentation
+
+Networkx_
+ Used for some visualization functions; required in order to build the
+ documentation.
+
+Nibabel_
+ Used for reading in data from fMRI data files; required in order to build the
+ documentation.
+
+Getting the latest release
+--------------------------
+
+If you have easy_install_ available on your system, nitime can be downloaded and
+install by issuing::
+
+ easy_install nitime
+
+.. _easy_install: easy-install_
+
+Otherwise, you can grab the latest release of the source-code at this page_
+
+.. _page: gh-download_
+
+Or, at the cheeseshop_
+
+.. _cheeseshop: nitime-pypi_
+
+If you want to download the source-code as it is being developed (pre-release),
+follow the instructions here: :ref:`following-latest`
+
+Or, if you just want to look at the current development, without using our
+source version-control system, go here_
+
+.. _here: gh-archive_
+
+
+Building from source
+--------------------
+
+The installation process is similar to other Python packages so it
+will be familiar if you have Python experience.
+
+Unpack the tarball and change into the source directory. Once in the
+source directory, you can build nitime using::
+
+ python setup.py install
+
+Or::
+
+ sudo python setup.py install
+
+.. include:: ../links_names.txt
diff --git a/doc/users/overview.rst b/doc/users/overview.rst
new file mode 100644
index 0000000..8105ce0
--- /dev/null
+++ b/doc/users/overview.rst
@@ -0,0 +1,258 @@
+.. _overview:
+
+===================
+Nitime: an overview
+===================
+
+Nitime can be used in order to represent, manipulate and analyze data in
+time-series from experimental data. The main intention of the library is to
+serve as a platform for analyzing data collected in neuroscientific
+experiments, ranging from single-cell recordings to fMRI. However, the
+object-oriented interface may match other kinds of time-series.
+
+In the :ref:`tutorial`, we provide examples of usage of the objects in the
+library and of some basic analysis.
+
+Here, we will provide a brief overview of the guiding principles underlying the
+structure and implementation of the library and the programming interface
+provided by library.
+
+We will survey the library their attributes and central functions and some possible example
+use-cases.
+
+=================
+Design Principles
+=================
+The main principle of the implementation of this library is a separation
+between representation of time-series and the analysis of time-series. Thus,
+the implementation is divided into three main elements:
+
+- Base classes for representation of time and data: These include objects
+ representing time (including support for the representation and conversion
+ between time-units) and objects that serve as containers for data:
+ representation of the time-series to be analyzed. These base classes will be
+ surveyed in more detail in the :ref:`base_classes`
+
+- Algorithms for analysis of time-series: A library containing implementations
+ of algorithms for various analysis methods is provided. Importantly, this
+ library is intentionally agnostic to the existence of the library
+ base-classes. Thus, users can choose to use these algorithms directly,
+ instead of relying on the base-classes provided by the library
+
+- Analyzer interfaces: These objects provide an interface between the algorithm
+ library and the time-series objects. Each one of these objects calls an
+ algorithm from the algorithms These objects rely on the details of the
+ implementation of the time-series objects. The input to these classes is
+ usually a time-series object and a set of parameters, which guide the
+ analysis. Some of the analyzer objects implement a thin interface (or
+ 'facade') to algorithms provided by scipy.signal.
+
+This principle is important, because it allows use of the analysis algorithms
+at two different levels. The algorithms are more general-purpose, but provide
+less support for the unique properties of time-series. The analyzer objects, on
+the other hand, provide a richer interface, but may be less flexible in their
+usage, because they assume use of the base-classes of the library.
+
+This structure also makes development of new algorithms and adoption of
+analysis code from other sources easier, because no specialized design
+properties are required in order to include an algorithm or set of algorithms
+in the algorithm library. However, once algorithms are adopted into the
+library, it requires that additional development of the analyzer object
+specific for this set of algorithms be implemented as well.
+
+Another important principle of the implementation is lazy initialization. Most
+attributes of both time-series and analysis objects are provided on a
+need-to-know basis. That is, initializing a time-series object, or an analyzer
+object does not trigger any intensive computations. Instead the computation of
+the attributes of analyzer objects is delayed until the moment the user calls
+these attributes. In addition, once a computation is triggered it is stored as
+an attribute of the object, which assures that accessing the results of an
+analysis will trigger the computation only on the first time the analysis resut
+is required. Thereafter, the result of the analysis is stored for further use
+of this result.
+
+.. _base_classes:
+
+==============
+ Base classes
+==============
+
+The library has several sets of classes, used for the representation of time
+and of time-series, in addition to classes used for analysis.
+
+The first kind of classes is used in order to represent time and inherits from
+:class:`np.ndarray`, see :ref:`time_classes`. Another are data containers, used
+to represent different kinds of time-series data, see
+:ref:`time_series_classes` A third important kind are *analyzer* objects. These
+objects can be used in order to apply a particular analysis to time-series
+objects, see :ref:`analyzer_objects`
+
+.. _time_classes:
+
+Time
+====
+Experimental data is usually represented with regard to *relative* time. That
+is, the time relative to the beginning of the measurement. This is in contrast
+to many other kinds of data, which are represented with regard to *absolute*
+time, (one example of this kind of time is calendaric time, which includes a
+reference to some common point, such as 0 CE, or Jan. 1st 1970). An example of
+data which benefits from representation with absolute time is the
+representation of financial time-series, which can be compared against each
+other, using the common reference and for which the concept of the work-week
+applies.
+
+However, because most often the absolute calender time of the occurence of
+events in an experiment is of no importance, we can disregard it. Rather, the
+comparison of the time progression of data in different experiments conducted
+in different calendar times (different days, different times in the same day)
+is more common.
+
+The underlying representation of time in :mod:`nitime` is in arrays of dtype
+:class:`int64`. This allows the representation to be immune to rounding errors
+arising from representation of time with floating point numbers (see
+[Goldberg1991]_). However, it restricts the smallest time-interval that can be
+represented. In :mod:`nitime`, the smallest discrete time-points are of size
+:attr:`base_unit`, and this unit is *picoseconds*. Thus, all underlying
+representations of time are made in this unit. Since for most practical uses,
+this representation is far too small, this might have resulted, in most cases
+in representations of time too long to be useful. In order to make the
+time-objects more manageable, time objects in :mod:`nitime` carry a
+:attr:`time_unit` and a :attr:`_conversion_factor`, which can be used
+as a convenience, in order to convert between the representation of time in the
+base unit and the appearance of time in the relevant time-unit.
+
+The first set of base classes is a set of representations of time itself. All
+these classes inherit from :class:`np.ndarray`. As mentioned above, the dtype of
+these classes is :class:`int64` and the underlying representation is always at
+the base unit. In addition to the methods inherited from :class:`np.ndarray`,
+these time representations have an :func:`at` method which . The result of this indexing
+will be to return the time-point in the the respective :class:`TimeSeries`
+which is most appropriate (see :ref:`time_series_access` for details). They
+have an :func:`index_at` method, which returns the integer index of this time
+in the underlying array. Finally, they will all have a :func:`during` method,
+which will allow indexing into these objects with an
+:ref:`interval_class`. This will return the appropriate times corresponding to
+an :ref:`interval_class` and :func:`index_during`, which will return the array
+of integers corresponding to the indices of these time-points in the array.
+
+For the time being, there are two types of Time classes: :ref:`TimeArray` and :ref:`UniformTime`.
+
+.. _TimeArray:
+
+:class:`TimeArray`
+-------------------
+
+This class has less restrictions on it: it is made of an 1-d array, which contains time-points that are not neccesarily ordered. It can also contain several copies of the same time-point. This class can be used in order to represent sparsely occuring events, measured at some unspecified sampling rate and possibly collected from several different channels, where the data is sampled in order of channel and not in order of time. As in the case of the :class:`np.ndarray`. This representation of time carries, in addition to the array itself an attribute :attr:`time_unit`, which is the unit in which we would like to present the time-points (recall that the underlying representation is always in the base-unit).
+
+.. _UniformTime:
+
+:class:`UniformTime`
+--------------------
+
+This class contains ordered uniformly sampled time-points. This class has an explicit representation of :attr:`t_0`, :attr:`sampling_rate` and :attr:`sampling_interval`. Thus, each element in this array can be used in order to represent the entire time interval $t$, such that: $t_i\leq t < t + \delta t$, where $t_i$ is the nominal value held by that element of the array, and $\delta t$ is the value of :attr:`sampling_interval`.
+
+This object contains additional attributes that are not shared by the other
+time objects. In particular, an object of :class:`UniformTime`, UT, will have
+the following:
+
+* :attr:`UT.t_0`: the first time-point in the series.
+* :attr:`UT.sampling_rate`: the sampling rate of the series (this is an
+ instance of .
+* :attr:`UT.sampling_interval`: the value of $\delta t$, mentioned above.
+* :attr:`UT.duration`: the total time of the series.
+
+Obviously, :attr:`UT.sampling_rate` and :attr:`UT.sampling_interval` are redundant, but can both be useful.
+
+
+:class:`Frequency`
+------------------
+
+The :attr:`UT.sampling_rate` of :class:`UniformTime` is an object of the :class:`Frequency` class. This is a representation of the frequency in Hz. It is derived from a combination of the :attr:`sampling_interval` and the :attr:`time_unit`.
+
+.. _time_series_classes:
+
+Time-series
+============
+
+These are data container classes for representing different kinds of
+time-series data types.
+
+In implementing these objects, we follow the following principles:
+
+* The time-series data representations do not inherit from
+ :class:`np.ndarray`. Instead, one of their attributes is a :attr:`data`
+ attribute, which *is* a :class:`np.ndarray`. This principle should allow for
+ a clean and compact implementation, which doesn't carry all manner of
+ unwanted properties into a bloated object with obscure and unknown behaviors.
+ We have previously decided to make *time* the last dimension in this
+ object, but recently we have been considering making this a user choice (in
+ order to enable indexing into the data by time in a straight-forward manner
+ (using expressions such as :class:`TI.data[i]`.
+* In tandem, one of their attributes is one of the :ref:`time_classes` base
+ classes described above. This is the :attr:`time` attribute of the
+ time-series object. Therefore, for :class:`TimeSeries` it is implemented in
+ the object with a :func:`desc.setattr_on_read` decoration, so that it is only
+ generated if it is needed.
+
+.. _TimeSeries:
+
+:class:`TimeSeries`
+--------------------------
+
+This represents time-series of data collected continuously and regularly. Can
+be used in order to represent typical physiological data measurements, such as
+measurements of BOLD responses, or of membrane-potential. The representation of
+time here is :ref:`UniformTime`.
+
+XXX Write more about the different attributes of this class.
+
+.. _Epochs:
+
+:class:`Epochs`
+---------------
+
+This class represents intervals of time, or epochs. Each instance of this class
+contains several attributes:
+
+- :attr:`E.start`: This is an object of class :class:`TimeArray`, which
+ represents a collection of starting times of epochs
+- :attr:`E.stop`: This is an object of class :class:`TimeArray` which
+ represents a collection of end points of the epochs.
+- :attr:`E.duration`: This is an object of class :class:`TimeArray` which
+ represents the durations of the epochs.
+- :attr:`E.offset`: This attribute represents the offset of the epoch
+- :attr:`E.time_unit`: This is
+
+.. _Events:
+
+:class:`Events`
+---------------
+
+This is an object which represents a collection of events. For example, this
+can represent discrete button presses occuring during an experiment. This
+object contains a :ref:`TimeArray` as its representation of time. This means
+that the events recorded in the :attr:`data` array can be organized
+according to any organizing principle you would want, not neccesarily according
+to their organization or order in time. For example, if events are read from
+different devices, the order of the events in the data array can be arbitrarily
+chosen to be the order of the devices from which data is read.
+
+
+
+Analyzers
+=========
+
+These objects implement a particular analysis, or family of analyses. Typically, the initialization of this kind of object can happen with
+a time-series object provided as input, as well as a set of parameter values setting. However, for most analyzer objects, the inputs can be provided upong
+calling the object, or by assignment to the already generated object.
+
+Sometimes, a user may wish to revert the computation, change some of the
+analysis parameters and recompute one or more of the results of the
+analysis. In order to do that, the analyzer objects implement a :attr:`reset`
+attribute, which reverts the computation of analysis attributes and allows to
+change parameters in the analyzer and recompute the analysis results. This
+structure keeps the cost of computation of quantities derived from the analysis
+rather low.
+
+.. [Goldberg1991] Goldberg D (1991). What every computer scientist should know
+ about floating-point arithmetic. ACM computing surveys 23: 5-48
diff --git a/doc/users/quickstart.rst b/doc/users/quickstart.rst
new file mode 100644
index 0000000..85581d7
--- /dev/null
+++ b/doc/users/quickstart.rst
@@ -0,0 +1,19 @@
+============
+ Quickstart
+============
+
+If you have never used python for anything and are interested in trying out
+nitime, you can quickly get up and running, by following these 3 steps:
+
+- Install the EPD: The Enthought Python Distribution contains all the
+ dependencies for using nitime and can be downloaded from here (free for
+ academics):
+ http://www.enthought.com/products/getepd.php
+
+- Use *easy install* to get nitime: Open a terminal window and enter::
+
+
+ easy_install --user nitime
+
+
+- Look at the :ref:`examples`.
diff --git a/doc/users/tutorial.rst b/doc/users/tutorial.rst
new file mode 100644
index 0000000..e0336fd
--- /dev/null
+++ b/doc/users/tutorial.rst
@@ -0,0 +1,101 @@
+.. _tutorial:
+
+=========
+ Tutorial
+=========
+
+In this tutorial, we will demonstrate the basic use of nitime in initializing,
+manipulating and analyzing a simple time-series object. For more advanced usage
+see the examples section (:ref:`examples`)
+
+In order to get started, import :mod:`nitime.timeseries`:
+
+.. code-block:: python
+
+ In [1]: import nitime.timeseries as ts
+
+Then, you can initialize a simple time-series object, by providing data and
+some information about the sampling-rate or sampling-interval:
+
+.. code-block:: python
+
+ In [2]: t1 = ts.TimeSeries([[1,2,3],[3,6,8]],sampling_rate=0.5)
+
+If you tab-complete, you will see that the object now has several different
+attributes:
+
+.. code-block:: python
+
+ In [3]: t1.
+ t1.at t1.metadata t1.time
+ t1.data t1.sampling_interval t1.time_unit
+ t1.duration t1.sampling_rate
+ t1.from_time_and_data t1.t0
+
+Note that the sampling_interval is the inverse of the sampling_rate:
+
+.. code-block:: python
+
+ In [4]: t1.sampling_interval
+ Out[4]: 2.0 s
+
+In addition, the sampling rate is now represented with the units in Hz:
+
+.. code-block:: python
+
+ In [5]: t1.sampling_rate
+ Out[5]: 0.5 Hz
+
+Also - once this object is available to you, you have access to the underlying
+representation of time:
+
+.. code-block:: python
+
+ In [6]: t1.time
+ Out[6]: UniformTime([ 0., 2., 4.], time_unit='s')
+
+Now import the analysis library:
+
+.. code-block:: python
+
+ In [7]: import nitime.analysis as nta
+
+and initialize an analyzer for correlation analysis:
+
+.. code-block:: python
+
+ In [8]: c = nta.CorrelationAnalyzer(t1)
+
+The simplest use of this analyzer (and also the default output) is to compute
+the correlation coefficient matrix of the data in the different rows of the
+time-series:
+
+.. code-block:: python
+
+ In [9]: c.corrcoef
+ Out[9]:
+ array([[ 1. , 0.99339927],
+ [ 0.99339927, 1. ]])
+
+but it can also be used in order to generate the cross-correlation function
+between the channels, which is also a time-series object:
+
+.. code-block:: python
+
+ In [63]: x = c.xcorr
+
+ In [64]: x.time
+ Out[64]: UniformTime([-6., -4., -2., 0., 2.], time_unit='s')
+
+ In [65]: x.data
+ Out[65]:
+ array([[[ 3., 8., 14., 8., 3.],
+ [ 8., 22., 39., 24., 9.]],
+
+ [[ 8., 22., 39., 24., 9.],
+ [ 24., 66., 109., 66., 24.]]])
+
+
+
+
+
diff --git a/doc/whatsnew/development.rst b/doc/whatsnew/development.rst
new file mode 100644
index 0000000..ade8ed0
--- /dev/null
+++ b/doc/whatsnew/development.rst
@@ -0,0 +1,9 @@
+
+============================
+ Release notes: development
+============================
+
+These are the development release notes. As features are added to nitime and
+fixes are made, this is the place to document the broad overview of these
+features and fixes, so that they can be included in the release notes for the
+next version.
diff --git a/doc/whatsnew/index.rst b/doc/whatsnew/index.rst
new file mode 100644
index 0000000..781e95e
--- /dev/null
+++ b/doc/whatsnew/index.rst
@@ -0,0 +1,28 @@
+.. Developers should add in this file, during each release cycle, information
+.. about important changes they've made, in a summary format that's meant for
+.. end users. For each release we normally have three sections: features, bug
+.. fixes and api breakage.
+.. Please remember to credit the authors of the contributions by name,
+.. especially when they are new users or developers who do not regularly
+.. participate in IPython's development.
+
+.. _whatsnew_index:
+
+=====================
+What's new in Nitime
+=====================
+
+This section documents the changes that have been made in various versions of
+Nitime:
+
+.. toctree::
+ :maxdepth: 1
+
+ version0.5
+ version0.4
+ version0.3
+
+
+Users should consult these pages to learn about new features, bug
+fixes and backwards incompatibilities. Developers should summarize the
+development work they do here.
diff --git a/doc/whatsnew/version0.3.rst b/doc/whatsnew/version0.3.rst
new file mode 100644
index 0000000..7d69243
--- /dev/null
+++ b/doc/whatsnew/version0.3.rst
@@ -0,0 +1,123 @@
+
+======================================
+ Release notes for nitime version 0.3
+======================================
+
+Summary of changes
+------------------
+
+Version 0.3 of nitime includes several additions and improvements:
+
+#. Testing: Test coverage of nitime has improved substantially. At this point,
+ 84 % of the code is executed when the test suite is executed. This includes
+ a full 100% execution of all l.o.c. in most of the algorithms
+ sub-module. Work led by Ariel Rokem.
+
+#. Style and layout improvements: The layout of the algorithms and analysis
+ sub-modules have been simplified and a large majority of the code-base has
+ been modified to conform with PEP8 standards. Work led by Ariel Rokem, with
+ additional thanks to Alex Gramfort for pushing for these changes and helping
+ to bring them about.
+
+#. Bug-fixes to the SNRAnalyzer: Several bugs in this module have been
+ fixed. Work led by Ariel Rokem (who put these bugs in there in the first
+ place...).
+
+#. MAR estimation algorithms: Extensive reworking of MAR estimation algorithms.
+ Work led by Mike Trumpis.
+
+#. SeedCorrelationAnalyzer: This analyzer allows flexible correlation analysis
+ in a few-to-many channel mode. Work led by Michael Waskom.
+
+#. GrangerAnalyzer: Following Mike Trumpis' work on MAR estimation, we have
+ implemented an Analyzer for Granger 'causality' analysis. Work led by Ariel
+ Rokem, Mike Trumpis and Fernando Perez
+
+#. Filtering: Implementation of zero phase-delay filtering, including IIR and
+ FIR filter methods to the FilterAnalyzer. Work led by Ariel Rokem
+
+#. Several new examples, including examples of the usage of these new analysis
+ methods.
+
+#. Epoch slicing: Additional work on TimeSeries objects, towards an
+ implementation. This feature is still at an experimental stage at this
+ point. Work led by Ariel Rokem, Fernando Perez, Killian Koepsell and Paul
+ Ivanov.
+
+
+
+Contributors to this release
+----------------------------
+
+* Alexandre Gramfort
+* Ariel Rokem
+* Christopher Burns
+* Fernando Perez
+* Jarrod Millman
+* Killian Koepsell
+* Michael Waskom
+* Mike Trumpis
+* Paul Ivanov
+* Yaroslav Halchenko
+
+.. Note::
+
+ This list was generated using::
+
+ git log dev/0.3 HEAD --format='* %aN <%aE>' |sed 's/@/\-at\-/' | sed 's/<>//' | sort -u
+
+ Please let us know if you should appear on this list and do not, so that we
+ can add your name in future release notes.
+
+
+Detailed stats from the github repository
+-----------------------------------------
+
+Github stats for the last 270 days.
+We closed a total of 38 issues, 28 pull requests and 10 regular
+issues; this is the full list (generated with the script
+`tools/github_stats.py`):
+
+Pull requests (28):
+
+* `78 <https://github.com/nipy/nitime/issues/78>`_: Doctests
+* `76 <https://github.com/nipy/nitime/issues/76>`_: Sphinx warnings
+* `74 <https://github.com/nipy/nitime/issues/74>`_: BF: IIR filtering can do band-pass as well as low-pass and high-pass.
+* `72 <https://github.com/nipy/nitime/issues/72>`_: ENH: Throw an informative warning when time-series is short for the NFFT.
+* `75 <https://github.com/nipy/nitime/issues/75>`_: ENH: Default behavior for time_series_from_file.
+* `71 <https://github.com/nipy/nitime/issues/71>`_: Granger analyzer
+* `73 <https://github.com/nipy/nitime/issues/73>`_: Seed correlation analyzer
+* `69 <https://github.com/nipy/nitime/issues/69>`_: BF: add back tril_indices from numpy 1.4, to support operation with older
+* `70 <https://github.com/nipy/nitime/issues/70>`_: Ar latex
+* `67 <https://github.com/nipy/nitime/issues/67>`_: Mar examples
+* `66 <https://github.com/nipy/nitime/issues/66>`_: Test coverage
+* `63 <https://github.com/nipy/nitime/issues/63>`_: Utils work
+* `62 <https://github.com/nipy/nitime/issues/62>`_: Interpolate dpss windows when they are too large to be calculated directl
+* `64 <https://github.com/nipy/nitime/issues/64>`_: Pass fir window as a kwarg.
+* `39 <https://github.com/nipy/nitime/issues/39>`_: Fix xcorr plot
+* `54 <https://github.com/nipy/nitime/issues/54>`_: Reorganize analysis
+* `49 <https://github.com/nipy/nitime/issues/49>`_: added basic arithetics to timeseries objects
+* `52 <https://github.com/nipy/nitime/issues/52>`_: Reorganization
+* `48 <https://github.com/nipy/nitime/issues/48>`_: Fix filter analyzer
+* `47 <https://github.com/nipy/nitime/issues/47>`_: Correlation analyzer
+* `43 <https://github.com/nipy/nitime/issues/43>`_: Filtfilt
+* `42 <https://github.com/nipy/nitime/issues/42>`_: (Not) Biopac
+* `41 <https://github.com/nipy/nitime/issues/41>`_: Fix example bugs
+* `40 <https://github.com/nipy/nitime/issues/40>`_: Epochslicing2
+* `33 <https://github.com/nipy/nitime/issues/33>`_: Epochslicing
+* `38 <https://github.com/nipy/nitime/issues/38>`_: Fix snr df bug
+* `37 <https://github.com/nipy/nitime/issues/37>`_: Event slicing
+* `36 <https://github.com/nipy/nitime/issues/36>`_: Index at bug
+
+Regular issues (10):
+
+* `31 <https://github.com/nipy/nitime/issues/31>`_: tools/make_examples.py runs all the examples every time
+* `56 <https://github.com/nipy/nitime/issues/56>`_: Test failure on newer versions of scipy
+* `65 <https://github.com/nipy/nitime/issues/65>`_: Prune the nipy/nitime repo from old stragglers
+* `57 <https://github.com/nipy/nitime/issues/57>`_: multi_taper_psd with jackknife=True fails with multiple timeseries input
+* `59 <https://github.com/nipy/nitime/issues/59>`_: missing parameter docstring in utils.ar_generator
+* `58 <https://github.com/nipy/nitime/issues/58>`_: Scale of sigma in algorithms.multi_taper_psd
+* `61 <https://github.com/nipy/nitime/issues/61>`_: fail to estimate dpss_windows for long signals
+* `34 <https://github.com/nipy/nitime/issues/34>`_: SNR information rates need to be normalized by the frequency resolution
+* `45 <https://github.com/nipy/nitime/issues/45>`_: Bugs in CorrelationAnalyzer
+* `29 <https://github.com/nipy/nitime/issues/29>`_: Filtering
diff --git a/doc/whatsnew/version0.4.rst b/doc/whatsnew/version0.4.rst
new file mode 100644
index 0000000..00f9d30
--- /dev/null
+++ b/doc/whatsnew/version0.4.rst
@@ -0,0 +1,111 @@
+======================================
+ Release notes for nitime version 0.4
+======================================
+
+Summary of changes
+------------------
+
+Major changes introduced in version 0.4 of :mod:`nitime`:
+
+#. :mod:`LazyImports <nitime.lazy>`: Imports of modules
+ are delayed until they are actually used. Work led by Paul Ivanov
+
+#. :class:`TimeArray <nitime.timeseries.TimeArray>` math: Mathematical
+ operations such as multiplication/division, as well as min/max/mean/sum
+ are now implemented for the TimeArray class. Work led by Paul Ivanov.
+
+#. Replace numpy FFT with scipy FFT. This should improve performance. Work
+ instigated and led by Alex Gramfort.
+
+#. Scipy > 0.10 compatibility: Changes to recent versions of scipy have caused
+ import of some modules of nitime to break. This version should have fixed this
+ issue.
+
+Contributors to this release
+----------------------------
+
+The following people contributed to this release:
+
+* Alexandre Gramfort
+* Ariel Rokem
+* endolith
+* Paul Ivanov
+* Sergey Karayev
+* Yaroslav Halchenko
+
+
+.. Note::
+
+ This list was generated using::
+
+ git log --pretty=format:"* %aN" PREV_RELEASE... | sort | uniq
+
+ Please let us know if you should appear on this list and do not, so that we
+ can add your name in future release notes.
+
+
+Detailed stats from the github repository
+-----------------------------------------
+
+GitHub stats for the last 311 days.
+We closed a total of 51 issues, 17 pull requests and 34 regular
+issues; this is the full list (generated with the script
+`tools/github_stats.py`):
+
+Pull Requests (17):
+
+* :ghissue:`104`: nose_arg gives test finer granularity
+* :ghissue:`103`: make the LazyImport jig pickleable
+* :ghissue:`101`: fix some typos
+* :ghissue:`99`: First of all, thanks a lot for the sweet correlation matrix visualization method! I noticed that the behavior of the color_anchor parameter is not what I expected. Let me know what you think
+* :ghissue:`98`: RF: utils.multi_interesect no longer relies on deprecated intersect1d_nu
+* :ghissue:`96`: BF: Import factorial from the scipy.misc namespace.
+* :ghissue:`94`: BF: Account for situations in which TimeSeries has more than two dimensions
+* :ghissue:`92`: Time array math functions
+* :ghissue:`91`: Timearray math
+* :ghissue:`88`: Lazy imports
+* :ghissue:`89`: Masked arrays
+* :ghissue:`86`: ENH: Different versions of nose require different input to first-package-
+* :ghissue:`83`: BF: Improvements and fixes to nosetesting.
+* :ghissue:`81`: ENH : s/numpy.fft/scipy.fftpack
+* :ghissue:`77`: BF: Carry around a copy of some of the spectral analysis functions.
+* :ghissue:`79`: pep8 + pyflakes + misc readability
+* :ghissue:`78`: Doctests
+
+Issues (34):
+
+* :ghissue:`30`: Make default behavior for fmri.io.time_series_from_file
+* :ghissue:`84`: Note on examples
+* :ghissue:`93`: TimeArray .prod is borked (because of overflow?)
+* :ghissue:`104`: nose_arg gives test finer granularity
+* :ghissue:`103`: make the LazyImport jig pickleable
+* :ghissue:`102`: sphinx docs won't build (related to lazyimports?)
+* :ghissue:`87`: Test failures on 10.4
+* :ghissue:`100`: magnitude of fft showing negative values
+* :ghissue:`101`: fix some typos
+* :ghissue:`99`: First of all, thanks a lot for the sweet correlation matrix visualization method! I noticed that the behavior of the color_anchor parameter is not what I expected. Let me know what you think
+* :ghissue:`97`: utils.py uses feature removed from numpy1.6
+* :ghissue:`98`: RF: utils.multi_interesect no longer relies on deprecated intersect1d_nu
+* :ghissue:`95`: ImportError: Cannot Import name Factorial
+* :ghissue:`96`: BF: Import factorial from the scipy.misc namespace.
+* :ghissue:`94`: BF: Account for situations in which TimeSeries has more than two dimensions
+* :ghissue:`92`: Time array math functions
+* :ghissue:`91`: Timearray math
+* :ghissue:`88`: Lazy imports
+* :ghissue:`89`: Masked arrays
+* :ghissue:`80`: Replace numpy fft with scipy fft
+* :ghissue:`86`: ENH: Different versions of nose require different input to first-package-
+* :ghissue:`85`: slicing time using epochs that start before or end after
+* :ghissue:`83`: BF: Improvements and fixes to nosetesting.
+* :ghissue:`82`: nosetest w/o exit=False funks up in ipython
+* :ghissue:`81`: ENH : s/numpy.fft/scipy.fftpack
+* :ghissue:`32`: Add a "how to release" page in the docs
+* :ghissue:`35`: index_at seems to fail with negative times
+* :ghissue:`50`: Setting IIR filter lower bound to 0
+* :ghissue:`44`: Warning when using coherence with welch method and NFFT longer than the time-series itself
+* :ghissue:`68`: tril_indices not available in fairly recent numpy versions
+* :ghissue:`77`: BF: Carry around a copy of some of the spectral analysis functions.
+* :ghissue:`55`: Warning in analysis.coherence might be a bug
+* :ghissue:`79`: pep8 + pyflakes + misc readability
+* :ghissue:`78`: Doctests
+
diff --git a/doc/whatsnew/version0.5.rst b/doc/whatsnew/version0.5.rst
new file mode 100644
index 0000000..9822222
--- /dev/null
+++ b/doc/whatsnew/version0.5.rst
@@ -0,0 +1,91 @@
+======================================
+ Release notes for nitime version 0.5
+======================================
+
+Summary of changes
+------------------
+
+Major changes introduced in version 0.5 of :mod:`nitime`:
+
+#. Python 3 support. Work led by Paul Ivanov and Ariel Rokem, with help from Thomas Kluyver, and Matthew Brett.
+
+#. Multi-taper F-test for harmonic components. Work led by Mike Trumpis.
+
+#. Continuous integration testing with Travis. Work led by Ariel Rokem.
+
+#. Various fixes and robustifications from several contributors (see below).
+
+Contributors to this release
+----------------------------
+
+The following people contributed to this release:
+
+* Ariel Rokem
+* Dmitry Shachnev
+* Eric Larson
+* Mike Trumpis
+* Paul Ivanov
+* endolith
+
+.. Note::
+
+ This list was generated using::
+
+ git log --pretty=format:"* %aN" rel/0.4... | sort | uniq
+
+ Please let us know if you should appear on this list and do not, so that we
+ can add your name in future release notes.
+
+
+Detailed stats from the github repository
+-----------------------------------------
+
+GitHub stats for the last 730 days. We closed a total of 40 issues, 17 pull
+requests and 23 regular issues; this is the full list (generated with the
+script `tools/github_stats.py`):
+
+Pull Requests (17):
+
+* :ghissue:`124`: Buildbot mpl
+* :ghissue:`114`: This should help the buildbot on older platforms
+* :ghissue:`122`: Mpl units patch
+* :ghissue:`121`: RF: Remove the dependency on external 'six', by integrating that file in
+* :ghissue:`119`: Python3 support!
+* :ghissue:`120`: Pi=py3k
+* :ghissue:`115`: BF: For complex signals, return both the negative and positive spectrum
+* :ghissue:`112`: Mt fix ups
+* :ghissue:`118`: FIX: Pass int
+* :ghissue:`117`: NF: On the way to enabling travis ci.
+* :ghissue:`111`: Use inheritance_diagram.py provided by Sphinx (>= 0.6)
+* :ghissue:`110`: BF + TST: Robustification and testing of utility function.
+* :ghissue:`108`: Doc timeseries
+* :ghissue:`109`: Spectra for multi-dimensional time-series
+* :ghissue:`107`: DOC: fix parameter rendering for timeseries
+* :ghissue:`106`: Fix rst definition list formatting
+* :ghissue:`105`: FIX: Kmax wrong, BW = bandwidth
+
+Issues (23):
+
+* :ghissue:`116`: Refer to github more prominently on webpage
+* :ghissue:`124`: Buildbot mpl
+* :ghissue:`114`: This should help the buildbot on older platforms
+* :ghissue:`123`: Memory error of GrangerAnalyzer
+* :ghissue:`122`: Mpl units patch
+* :ghissue:`121`: RF: Remove the dependency on external 'six', by integrating that file in
+* :ghissue:`120`: Pi=py3k
+* :ghissue:`119`: Python3 support!
+* :ghissue:`115`: BF: For complex signals, return both the negative and positive spectrum
+* :ghissue:`112`: Mt fix ups
+* :ghissue:`118`: FIX: Pass int
+* :ghissue:`117`: NF: On the way to enabling travis ci.
+* :ghissue:`113`: Race condition provoked in TimeArray
+* :ghissue:`111`: Use inheritance_diagram.py provided by Sphinx (>= 0.6)
+* :ghissue:`110`: BF + TST: Robustification and testing of utility function.
+* :ghissue:`108`: Doc timeseries
+* :ghissue:`109`: Spectra for multi-dimensional time-series
+* :ghissue:`107`: DOC: fix parameter rendering for timeseries
+* :ghissue:`106`: Fix rst definition list formatting
+* :ghissue:`105`: FIX: Kmax wrong, BW = bandwidth
+* :ghissue:`30`: Make default behavior for fmri.io.time_series_from_file
+* :ghissue:`84`: Note on examples
+* :ghissue:`93`: TimeArray .prod is borked (because of overflow?)
diff --git a/nitime/LICENSE b/nitime/LICENSE
new file mode 100644
index 0000000..934d189
--- /dev/null
+++ b/nitime/LICENSE
@@ -0,0 +1,30 @@
+Copyright (c) 2006-2011, NIPY Developers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the NIPY Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/nitime/__init__.py b/nitime/__init__.py
new file mode 100644
index 0000000..c021e80
--- /dev/null
+++ b/nitime/__init__.py
@@ -0,0 +1,33 @@
+"""
+Nitime: Time-series analysis for neuroscience
+
+The module has several sub-modules:
+
+- ``timeseries``: contains the constructors for time and time-series objects
+
+- ``algorithms``: Algorithms. This sub-module depends only on scipy,numpy and
+ matplotlib. Contains various algorithms.
+
+- ``utils``: Utility functions.
+
+- ``analysis``: Contains *Analyzer* objects, which implement particular
+ analysis methods on the time-series objects
+
+- ``viz``: Vizualization
+
+All of the sub-modules will be imported as part of ``__init__``, so that users
+have all of these things at their fingertips.
+"""
+
+__docformat__ = 'restructuredtext'
+
+from .version import __version__
+
+from . import algorithms
+from . import timeseries
+from . import analysis
+from . import six
+
+from .timeseries import *
+
+from nitime.testlib import test
diff --git a/nitime/_mpl_units.py b/nitime/_mpl_units.py
new file mode 100644
index 0000000..a75dbdc
--- /dev/null
+++ b/nitime/_mpl_units.py
@@ -0,0 +1,226 @@
+"""
+
+This is a fixed copy of a module from Matplotlib v1.3 (https://github.com/matplotlib/matplotlib/pull/2591).
+
+It was taken verbatim from Matplotlib's github repository and is, as is all of
+MPL v1.3.1, copyright (c) 2012-2013 Matplotlib Development Team; All Rights
+Reserved.
+
+1. This LICENSE AGREEMENT is between the Matplotlib Development Team
+("MDT"), and the Individual or Organization ("Licensee") accessing and
+otherwise using matplotlib software in source or binary form and its
+associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, MDT
+hereby grants Licensee a nonexclusive, royalty-free, world-wide license
+to reproduce, analyze, test, perform and/or display publicly, prepare
+derivative works, distribute, and otherwise use matplotlib 1.3.1
+alone or in any derivative version, provided, however, that MDT's
+License Agreement and MDT's notice of copyright, i.e., "Copyright (c)
+2012-2013 Matplotlib Development Team; All Rights Reserved" are retained in
+matplotlib 1.3.1 alone or in any derivative version prepared by
+Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on or
+incorporates matplotlib 1.3.1 or any part thereof, and wants to
+make the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to matplotlib 1.3.1.
+
+4. MDT is making matplotlib 1.3.1 available to Licensee on an "AS
+IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 1.3.1
+WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
+1.3.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
+LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
+MATPLOTLIB 1.3.1, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
+THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between MDT and
+Licensee. This License Agreement does not grant permission to use MDT
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib 1.3.1,
+Licensee agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+It is distributed under the following license:
+The classes here provide support for using custom classes with
+matplotlib, eg those that do not expose the array interface but know
+how to converter themselves to arrays. It also supoprts classes with
+units and units conversion. Use cases include converters for custom
+objects, eg a list of datetime objects, as well as for objects that
+are unit aware. We don't assume any particular units implementation,
+rather a units implementation must provide a ConversionInterface, and
+the register with the Registry converter dictionary. For example,
+here is a complete implementation which supports plotting with native
+datetime objects::
+
+
+ import matplotlib.units as units
+ import matplotlib.dates as dates
+ import matplotlib.ticker as ticker
+ import datetime
+
+ class DateConverter(units.ConversionInterface):
+
+ @staticmethod
+ def convert(value, unit, axis):
+ 'convert value to a scalar or array'
+ return dates.date2num(value)
+
+ @staticmethod
+ def axisinfo(unit, axis):
+ 'return major and minor tick locators and formatters'
+ if unit!='date': return None
+ majloc = dates.AutoDateLocator()
+ majfmt = dates.AutoDateFormatter(majloc)
+ return AxisInfo(majloc=majloc,
+ majfmt=majfmt,
+ label='date')
+
+ @staticmethod
+ def default_units(x, axis):
+ 'return the default unit for x or None'
+ return 'date'
+
+ # finally we register our object type with a converter
+ units.registry[datetime.date] = DateConverter()
+
+"""
+from __future__ import print_function
+from matplotlib.cbook import iterable, is_numlike
+import numpy as np
+
+
+class AxisInfo:
+ """information to support default axis labeling and tick labeling, and
+ default limits"""
+ def __init__(self, majloc=None, minloc=None,
+ majfmt=None, minfmt=None, label=None,
+ default_limits=None):
+ """
+ majloc and minloc: TickLocators for the major and minor ticks
+ majfmt and minfmt: TickFormatters for the major and minor ticks
+ label: the default axis label
+ default_limits: the default min, max of the axis if no data is present
+ If any of the above are None, the axis will simply use the default
+ """
+ self.majloc = majloc
+ self.minloc = minloc
+ self.majfmt = majfmt
+ self.minfmt = minfmt
+ self.label = label
+ self.default_limits = default_limits
+
+
+class ConversionInterface:
+ """
+ The minimal interface for a converter to take custom instances (or
+ sequences) and convert them to values mpl can use
+ """
+ @staticmethod
+ def axisinfo(unit, axis):
+ 'return an units.AxisInfo instance for axis with the specified units'
+ return None
+
+ @staticmethod
+ def default_units(x, axis):
+ 'return the default unit for x or None for the given axis'
+ return None
+
+ @staticmethod
+ def convert(obj, unit, axis):
+ """
+ convert obj using unit for the specified axis. If obj is a sequence,
+ return the converted sequence. The ouput must be a sequence of scalars
+ that can be used by the numpy array layer
+ """
+ return obj
+
+ @staticmethod
+ def is_numlike(x):
+ """
+ The matplotlib datalim, autoscaling, locators etc work with
+ scalars which are the units converted to floats given the
+ current unit. The converter may be passed these floats, or
+ arrays of them, even when units are set. Derived conversion
+ interfaces may opt to pass plain-ol unitless numbers through
+ the conversion interface and this is a helper function for
+ them.
+ """
+ if iterable(x):
+ for thisx in x:
+ return is_numlike(thisx)
+ else:
+ return is_numlike(x)
+
+
+class Registry(dict):
+ """
+ register types with conversion interface
+ """
+ def __init__(self):
+ dict.__init__(self)
+ self._cached = {}
+
+ def get_converter(self, x):
+ 'get the converter interface instance for x, or None'
+
+ if not len(self):
+ return None # nothing registered
+ #DISABLED idx = id(x)
+ #DISABLED cached = self._cached.get(idx)
+ #DISABLED if cached is not None: return cached
+
+ converter = None
+ classx = getattr(x, '__class__', None)
+
+ if classx is not None:
+ converter = self.get(classx)
+
+ if isinstance(x, np.ndarray) and x.size:
+ xravel = x.ravel()
+ try:
+ # pass the first value of x that is not masked back to
+ # get_converter
+ if not np.all(xravel.mask):
+ # some elements are not masked
+ converter = self.get_converter(
+ xravel[np.argmin(xravel.mask)])
+ return converter
+ except AttributeError:
+ # not a masked_array
+ # Make sure we don't recurse forever -- it's possible for
+ # ndarray subclasses to continue to return subclasses and
+ # not ever return a non-subclass for a single element.
+ next_item = xravel[0]
+ if (not isinstance(next_item, np.ndarray) or
+ next_item.shape != x.shape):
+ converter = self.get_converter(next_item)
+ return converter
+
+ if converter is None and iterable(x):
+ for thisx in x:
+ # Make sure that recursing might actually lead to a solution,
+ # if we are just going to re-examine another item of the same
+ # kind, then do not look at it.
+ if classx and classx != getattr(thisx, '__class__', None):
+ converter = self.get_converter(thisx)
+ return converter
+
+ #DISABLED self._cached[idx] = converter
+ return converter
+
+
+registry = Registry()
diff --git a/nitime/_utils.pyx b/nitime/_utils.pyx
new file mode 100644
index 0000000..f679ef5
--- /dev/null
+++ b/nitime/_utils.pyx
@@ -0,0 +1,64 @@
+""" -*- python -*- file
+C-level implementation of the following routines in utils.py:
+
+ * tridisolve()
+
+"""
+
+import numpy as np
+cimport numpy as cnp
+cimport cython
+
+@cython.boundscheck(False)
+def tridisolve(cnp.ndarray[cnp.npy_double, ndim=1] d,
+ cnp.ndarray[cnp.npy_double, ndim=1] e,
+ cnp.ndarray[cnp.npy_double, ndim=1] b, overwrite_b=True):
+ """
+ Symmetric tridiagonal system solver, from Golub and Van Loan pg 157
+
+ Parameters
+ ----------
+
+ d : ndarray
+ main diagonal stored in d[:]
+ e : ndarray
+ superdiagonal stored in e[:-1]
+ b : ndarray
+ RHS vector
+
+ Returns
+ -------
+
+ x : ndarray
+ Solution to Ax = b (if overwrite_b is False). Otherwise solution is
+ stored in previous RHS vector b
+
+ """
+ # indexing
+ cdef int N = len(b)
+ cdef int k
+
+ # work vectors
+ cdef cnp.ndarray[cnp.npy_double, ndim=1] dw
+ cdef cnp.ndarray[cnp.npy_double, ndim=1] ew
+ cdef cnp.ndarray[cnp.npy_double, ndim=1] x
+ dw = d.copy()
+ ew = e.copy()
+ if overwrite_b:
+ x = b
+ else:
+ x = b.copy()
+ for k in xrange(1, N):
+ # e^(k-1) = e(k-1) / d(k-1)
+ # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)
+ t = ew[k - 1]
+ ew[k - 1] = t / dw[k - 1]
+ dw[k] = dw[k] - t * ew[k - 1]
+ for k in xrange(1, N):
+ x[k] = x[k] - ew[k - 1] * x[k - 1]
+ x[N - 1] = x[N - 1] / dw[N - 1]
+ for k in xrange(N - 2, -1, -1):
+ x[k] = x[k] / dw[k] - ew[k] * x[k + 1]
+
+ if not overwrite_b:
+ return x
diff --git a/nitime/algorithms/__init__.py b/nitime/algorithms/__init__.py
new file mode 100644
index 0000000..57a9b6d
--- /dev/null
+++ b/nitime/algorithms/__init__.py
@@ -0,0 +1,61 @@
+"""
+This module contains implementations of algorithms for time series
+analysis. These algorithms include:
+
+1. Spectral estimation: calculate the spectra of time-series and cross-spectra
+between time-series.
+
+:func:`get_spectra`, :func:`get_spectra_bi`, :func:`periodogram`,
+:func:`periodogram_csd`, :func:`dpss_windows`, :func:`multi_taper_psd`,
+:func:`multi_taper_csd`, :func:`mtm_cross_spectrum`
+
+2. Coherency: calculate the pairwise correlation between time-series in the
+frequency domain and related quantities.
+
+:func:`coherency`, :func:`coherence`, :func:`coherence_regularized`,
+:func:`coherency_regularized`, :func:`coherency_bavg`, :func:`coherence_bavg`,
+:func:`coherence_partial`, :func:`coherence_partial_bavg`,
+:func:`coherency_phase_spectrum`, :func:`coherency_phase_delay`,
+:func:`coherency_phase_delay_bavg`, :func:`correlation_spectrum`
+
+3. Cached coherency: A set of special functions for quickly calculating
+coherency in large data-sets, where the calculation is done over only a subset
+of the adjacency matrix edges and intermediate calculations are cached, in
+order to save calculation time.
+
+:func:`cache_fft`, :func:`cache_to_psd`, :func:`cache_to_phase`,
+:func:`cache_to_relative_phase`, :func:`cache_to_coherency`.
+
+4. Event-related analysis: calculate the correlation between time-series and
+external events.
+
+:func:`freq_domain_xcorr`, :func:`freq_domain_xcorr_zscored`, :func:`fir`
+
+5. Wavelet transforms: Calculate wavelet transforms of time-series data.
+
+:func:`wmorlet`, :func:`wfmorlet_fft`, :func:`wlogmorlet`,
+:func:`wlogmorlet_fft`
+
+6. Filtering: Filter a signal in the frequency domain.
+
+:func:`boxcar_filter`
+
+7. Autoregressive estimation and granger causality
+
+:func:
+
+The algorithms in this library are the functional form of the algorithms, which
+accept as inputs numpy array and produce numpy array outputs. Therfore, they
+can be used on any type of data which can be represented in numpy arrays. See
+also :mod:`nitime.analysis` for simplified analysis interfaces, using the
+data containers implemented in :mod:`nitime.timeseries`
+
+
+"""
+from nitime.algorithms.spectral import *
+from nitime.algorithms.cohere import *
+from nitime.algorithms.wavelet import *
+from nitime.algorithms.event_related import *
+from nitime.algorithms.autoregressive import *
+from nitime.algorithms.filter import *
+from nitime.algorithms.correlation import *
diff --git a/nitime/algorithms/autoregressive.py b/nitime/algorithms/autoregressive.py
new file mode 100644
index 0000000..019a59d
--- /dev/null
+++ b/nitime/algorithms/autoregressive.py
@@ -0,0 +1,525 @@
+r"""
+
+Autoregressive (AR) processes are processes of the form:
+
+.. math::
+
+ x(n) = a(1)x(n-1) + a(2)x(n-2) + ... + a(P)x(n-P) + e(n)
+
+where e(n) is a white noise process. The usage of 'e' suggests interpreting
+the linear combination of P past values of x(n) as the minimum mean square
+error linear predictor of x(n) Thus
+
+.. math::
+
+ e(n) = x(n) - a(1)x(n-1) - a(2)x(n-2) - ... - a(P)x(n-P)
+
+Due to whiteness, e(n) is also pointwise uncorrelated--ie,
+
+.. math::
+ :nowrap:
+
+ \begin{align*}
+ \text{(i)} && E\{e(n)e^{*}(n-m)\}& = \delta(n-m) &\\
+ \text{(ii)} && E\{e(n)x^{*}(m)\} & = 0 & m\neq n\\
+ \text{(iii)} && E\{|e|^{2}\} = E\{e(n)e^{*}(n)\} &= E\{e(n)x^{*}(n)\} &
+ \end{align*}
+
+These principles form the basis of the methods in this module for
+estimating the AR coefficients and the error/innovations power.
+"""
+
+
+import numpy as np
+from nitime.lazy import scipy_linalg as linalg
+
+import nitime.utils as utils
+from .spectral import freq_response
+
+
+def AR_est_YW(x, order, rxx=None):
+ r"""Determine the autoregressive (AR) model of a random process x using
+ the Yule Walker equations. The AR model takes this convention:
+
+ .. math::
+
+ x(n) = a(1)x(n-1) + a(2)x(n-2) + \dots + a(p)x(n-p) + e(n)
+
+ where e(n) is a zero-mean white noise process with variance sig_sq,
+ and p is the order of the AR model. This method returns the a_i and
+ sigma
+
+ The orthogonality property of minimum mean square error estimates
+ states that
+
+ .. math::
+
+ E\{e(n)x^{*}(n-k)\} = 0 \quad 1\leq k\leq p
+
+ Inserting the definition of the error signal into the equations above
+ yields the Yule Walker system of equations:
+
+ .. math::
+
+ R_{xx}(k) = \sum_{i=1}^{p}a(i)R_{xx}(k-i) \quad1\leq k\leq p
+
+ Similarly, the variance of the error process is
+
+ .. math::
+
+ E\{e(n)e^{*}(n)\} = E\{e(n)x^{*}(n)\} = R_{xx}(0)-\sum_{i=1}^{p}a(i)R^{*}(i)
+
+
+ Parameters
+ ----------
+ x : ndarray
+ The sampled autoregressive random process
+
+ order : int
+ The order p of the AR system
+
+ rxx : ndarray (optional)
+ An optional, possibly unbiased estimate of the autocorrelation of x
+
+ Returns
+ -------
+ ak, sig_sq : The estimated AR coefficients and innovations variance
+
+ """
+ if rxx is not None and type(rxx) == np.ndarray:
+ r_m = rxx[:order + 1]
+ else:
+ r_m = utils.autocorr(x)[:order + 1]
+
+ Tm = linalg.toeplitz(r_m[:order])
+ y = r_m[1:]
+ ak = linalg.solve(Tm, y)
+ sigma_v = r_m[0].real - np.dot(r_m[1:].conj(), ak).real
+ return ak, sigma_v
+
+
+def AR_est_LD(x, order, rxx=None):
+ r"""Levinson-Durbin algorithm for solving the Hermitian Toeplitz
+ system of Yule-Walker equations in the AR estimation problem
+
+ .. math::
+
+ T^{(p)}a^{(p)} = \gamma^{(p+1)}
+
+ where
+
+ .. math::
+ :nowrap:
+
+ \begin{align*}
+ T^{(p)} &= \begin{pmatrix}
+ R_{0} & R_{1}^{*} & \cdots & R_{p-1}^{*}\\
+ R_{1} & R_{0} & \cdots & R_{p-2}^{*}\\
+ \vdots & \vdots & \ddots & \vdots\\
+ R_{p-1}^{*} & R_{p-2}^{*} & \cdots & R_{0}
+ \end{pmatrix}\\
+ a^{(p)} &=\begin{pmatrix} a_1 & a_2 & \cdots a_p \end{pmatrix}^{T}\\
+ \gamma^{(p+1)}&=\begin{pmatrix}R_1 & R_2 & \cdots & R_p \end{pmatrix}^{T}
+ \end{align*}
+
+ and :math:`R_k` is the autocorrelation of the kth lag
+
+ Parameters
+ ----------
+
+ x : ndarray
+ the zero-mean stochastic process
+ order : int
+ the AR model order--IE the rank of the system.
+ rxx : ndarray, optional
+ (at least) order+1 samples of the autocorrelation sequence
+
+ Returns
+ -------
+
+ ak, sig_sq
+ The AR coefficients for 1 <= k <= p, and the variance of the
+ driving white noise process
+
+ """
+
+ if rxx is not None and type(rxx) == np.ndarray:
+ rxx_m = rxx[:order + 1]
+ else:
+ rxx_m = utils.autocorr(x)[:order + 1]
+ w = np.zeros((order + 1, ), rxx_m.dtype)
+ # intialize the recursion with the R[0]w[1]=r[1] solution (p=1)
+ b = rxx_m[0].real
+ w_k = rxx_m[1] / b
+ w[1] = w_k
+ p = 2
+ while p <= order:
+ b *= 1 - (w_k * w_k.conj()).real
+ w_k = (rxx_m[p] - (w[1:p] * rxx_m[1:p][::-1]).sum()) / b
+ # update w_k from k=1,2,...,p-1
+ # with a correction from w*_i i=p-1,p-2,...,1
+ w[1:p] = w[1:p] - w_k * w[1:p][::-1].conj()
+ w[p] = w_k
+ p += 1
+ b *= 1 - (w_k * w_k.conj()).real
+ return w[1:], b
+
+
+def lwr_recursion(r):
+ r"""Perform a Levinson-Wiggins[Whittle]-Robinson recursion to
+ find the coefficients a(i) that satisfy the matrix version
+ of the Yule-Walker system of P + 1 equations:
+
+ sum_{i=0}^{P} a(i)r(k-i) = 0, for k = {1,2,...,P}
+
+ with the additional equation
+
+ sum_{i=0}^{P} a(i)r(-k) = V
+
+ where V is the covariance matrix of the innovations process,
+ and a(0) is fixed at the identity matrix
+
+ Also note that r is defined as:
+
+ r(k) = E{ X(t)X*(t-k) } ( * = conjugate transpose )
+ r(-k) = r*(k)
+
+
+ This routine adapts the algorithm found in eqs (1)-(11)
+ in Morf, Vieira, Kailath 1978
+
+ Parameters
+ ----------
+
+ r : ndarray, shape (P + 1, nc, nc)
+
+ Returns
+ -------
+
+ a : ndarray (P,nc,nc)
+ coefficient sequence of order P
+ sigma : ndarray (nc,nc)
+ covariance estimate
+
+ """
+
+ # r is (P+1, nc, nc)
+ nc = r.shape[1]
+ P = r.shape[0] - 1
+
+ a = np.zeros((P, nc, nc)) # ar coefs
+ b = np.zeros_like(a) # lp coefs
+ sigb = np.zeros_like(r[0]) # forward prediction error covariance
+ sigf = np.zeros_like(r[0]) # backward prediction error covariance
+ delta = np.zeros_like(r[0])
+
+ # initialize
+ idnt = np.eye(nc)
+ sigf[:] = r[0]
+ sigb[:] = r[0]
+
+ # iteratively find sequences A_{p+1}(i) and B_{p+1}(i)
+ for p in range(P):
+
+ # calculate delta_{p+1}
+ # delta_{p+1} = r(p+1) + sum_{i=1}^{p} a(i)r(p+1-i)
+ delta[:] = r[p + 1]
+ for i in range(1, p + 1):
+ delta += np.dot(a[i - 1], r[p + 1 - i])
+
+ # intermediate values XXX: should turn these into solution-problems
+ ka = np.dot(delta, linalg.inv(sigb))
+ kb = np.dot(delta.conj().T, linalg.inv(sigf))
+
+ # store a_{p} before updating sequence to a_{p+1}
+ ao = a.copy()
+ # a_{p+1}(i) = a_{p}(i) - ka*b_{p}(p+1-i) for i in {1,2,...,p}
+ # b_{p+1}(i) = b_{p}(i) - kb*a_{p}(p+1-i) for i in {1,2,...,p}
+ for i in range(1, p + 1):
+ a[i - 1] -= np.dot(ka, b[p - i])
+ for i in range(1, p + 1):
+ b[i - 1] -= np.dot(kb, ao[p - i])
+
+ a[p] = -ka
+ b[p] = -kb
+
+ sigf = np.dot(idnt - np.dot(ka, kb), sigf)
+ sigb = np.dot(idnt - np.dot(kb, ka), sigb)
+
+ return a, sigf
+
+
+def MAR_est_LWR(x, order, rxx=None):
+ r"""
+ MAR estimation, using the LWR algorithm, as in Morf et al.
+
+
+ Parameters
+ ----------
+ x : ndarray
+ The sampled autoregressive random process
+
+ order : int
+ The order P of the AR system
+
+ rxx : ndarray (optional)
+ An optional, possibly unbiased estimate of the autocovariance of x
+
+ Returns
+ -------
+ a, ecov : The system coefficients and the estimated covariance
+ """
+ Rxx = utils.autocov_vector(x, nlags=order)
+ a, ecov = lwr_recursion(Rxx.transpose(2, 0, 1))
+ return a, ecov
+
+
+def AR_psd(ak, sigma_v, n_freqs=1024, sides='onesided'):
+ r"""
+ Compute the PSD of an AR process, based on the process coefficients and
+ covariance
+
+ n_freqs : int
+ The number of spacings on the frequency grid from [-PI,PI).
+ If sides=='onesided', n_freqs/2+1 frequencies are computed from [0,PI]
+
+ sides : str (optional)
+ Indicates whether to return a one-sided or two-sided PSD
+
+ Returns
+ -------
+ (w, ar_psd)
+ w : Array of normalized frequences from [-.5, .5) or [0,.5]
+ ar_psd : A PSD estimate computed by sigma_v / |1-a(f)|**2 , where
+ a(f) = DTFT(ak)
+
+
+ """
+ # compute the psd as |H(f)|**2, where H(f) is the transfer function
+ # for this model s[n] = a1*s[n-1] + a2*s[n-2] + ... aP*s[n-P] + v[n]
+ # Taken as a IIR system with unit-variance white noise input e[n]
+ # and output s[n],
+ # b0*e[n] = w0*s[n] + w1*s[n-1] + w2*s[n-2] + ... + wP*s[n-P],
+ # where b0 = sqrt(VAR{v[n]}), w0 = 1, and wk = -ak for k>0
+ # the transfer function here is H(f) = DTFT(w)
+ # leading to Sxx(f)/Exx(f) = |H(f)|**2 = VAR{v[n]} / |W(f)|**2
+ w, hw = freq_response(sigma_v ** 0.5, a=np.r_[1, -ak],
+ n_freqs=n_freqs, sides=sides)
+ ar_psd = (hw * hw.conj()).real
+ return (w, 2 * ar_psd) if sides == 'onesided' else (w, ar_psd)
+
+
+#-----------------------------------------------------------------------------
+# Granger causality analysis
+#-----------------------------------------------------------------------------
+def transfer_function_xy(a, n_freqs=1024):
+ r"""Helper routine to compute the transfer function H(w) based
+ on sequence of coefficient matrices A(i). The z transforms
+ follow from this definition:
+
+ X[t] + sum_{k=1}^P a[k]X[t-k] = Err[t]
+
+ Parameters
+ ----------
+
+ a : ndarray, shape (P, 2, 2)
+ sequence of coef matrices describing an mAR process
+ n_freqs : int, optional
+ number of frequencies to compute in range [0,PI]
+
+ Returns
+ -------
+
+ Hw : ndarray
+ The transfer function from innovations process vector to
+ mAR process X
+
+ """
+ # these concatenations follow from the observation that A(0) is
+ # implicitly the identity matrix
+ ai = np.r_[1, a[:, 0, 0]]
+ bi = np.r_[0, a[:, 0, 1]]
+ ci = np.r_[0, a[:, 1, 0]]
+ di = np.r_[1, a[:, 1, 1]]
+
+ # compute A(w) such that A(w)X(w) = Err(w)
+ w, aw = freq_response(ai, n_freqs=n_freqs)
+ _, bw = freq_response(bi, n_freqs=n_freqs)
+ _, cw = freq_response(ci, n_freqs=n_freqs)
+ _, dw = freq_response(di, n_freqs=n_freqs)
+
+ #A = np.array([ [1-aw, -bw], [-cw, 1-dw] ])
+ A = np.array([[aw, bw], [cw, dw]])
+ # compute the transfer function from Err to X. Since Err(w) is 1(w),
+ # the transfer function H(w) = A^(-1)(w)
+ # (use 2x2 matrix shortcut)
+ detA = (A[0, 0] * A[1, 1] - A[0, 1] * A[1, 0])
+ Hw = np.array([[dw, -bw], [-cw, aw]])
+ Hw /= detA
+ return w, Hw
+
+
+def spectral_matrix_xy(Hw, cov):
+ r"""Compute the spectral matrix S(w), from the convention:
+
+ X[t] + sum_{k=1}^P a[k]X[t-k] = Err[t]
+
+ The formulation follows from Ding, Chen, Bressler 2008,
+ pg 6 eqs (11) to (15)
+
+ The transfer function H(w) should be computed first from
+ transfer_function_xy()
+
+ Parameters
+ ----------
+
+ Hw : ndarray (2, 2, n_freqs)
+ Pre-computed transfer function from transfer_function_xy()
+
+ cov : ndarray (2, 2)
+ The covariance between innovations processes in Err[t]
+
+ Returns
+ -------
+
+ Sw : ndarrays
+ matrix of spectral density functions
+ """
+
+ nw = Hw.shape[-1]
+ # now compute specral density function estimate
+ # S(w) = H(w)SigH*(w)
+ Sw = np.empty((2, 2, nw), 'D')
+
+ # do a shortcut for 2x2:
+ # compute T(w) = SigH*(w)
+ # t00 = Sig[0,0] * H*_00(w) + Sig[0,1] * H*_10(w)
+ t00 = cov[0, 0] * Hw[0, 0].conj() + cov[0, 1] * Hw[0, 1].conj()
+ # t01 = Sig[0,0] * H*_01(w) + Sig[0,1] * H*_11(w)
+ t01 = cov[0, 0] * Hw[1, 0].conj() + cov[0, 1] * Hw[1, 1].conj()
+ # t10 = Sig[1,0] * H*_00(w) + Sig[1,1] * H*_10(w)
+ t10 = cov[1, 0] * Hw[0, 0].conj() + cov[1, 1] * Hw[0, 1].conj()
+ # t11 = Sig[1,0] * H*_01(w) + Sig[1,1] * H*_11(w)
+ t11 = cov[1, 0] * Hw[1, 0].conj() + cov[1, 1] * Hw[1, 1].conj()
+
+ # now S(w) = H(w)T(w)
+ Sw[0, 0] = Hw[0, 0] * t00 + Hw[0, 1] * t10
+ Sw[0, 1] = Hw[0, 0] * t01 + Hw[0, 1] * t11
+ Sw[1, 0] = Hw[1, 0] * t00 + Hw[1, 1] * t10
+ Sw[1, 1] = Hw[1, 0] * t01 + Hw[1, 1] * t11
+
+ return Sw
+
+
+def coherence_from_spectral(Sw):
+ r"""Compute the spectral coherence between processes X and Y,
+ given their spectral matrix S(w)
+
+ Parameters
+ ----------
+
+ Sw : ndarray
+ spectral matrix
+ """
+
+ Sxx = Sw[0, 0].real
+ Syy = Sw[1, 1].real
+
+ Sxy_mod_sq = (Sw[0, 1] * Sw[1, 0]).real
+ Sxy_mod_sq /= Sxx
+ Sxy_mod_sq /= Syy
+ return Sxy_mod_sq
+
+
+def interdependence_xy(Sw):
+ r"""Compute the 'total interdependence' between processes X and Y,
+ given their spectral matrix S(w)
+
+ Parameters
+ ----------
+
+ Sw : ndarray
+ spectral matrix
+
+ Returns
+ -------
+
+ fxy(w)
+ interdependence function of frequency
+ """
+
+ Cw = coherence_from_spectral(Sw)
+ return -np.log(1 - Cw)
+
+
+def granger_causality_xy(a, cov, n_freqs=1024):
+ r"""Compute the Granger causality between processes X and Y, which
+ are linked in a multivariate autoregressive (mAR) model parameterized
+ by coefficient matrices a(i) and the innovations covariance matrix
+
+ X[t] + sum_{k=1}^P a[k]X[t-k] = Err[t]
+
+ Parameters
+ ----------
+
+ a : ndarray, (P,2,2)
+ coefficient matrices characterizing the autoregressive mixing
+ cov : ndarray, (2,2)
+ covariance matrix characterizing the innovations vector
+ n_freqs: int
+ number of frequencies to compute in the fourier transform
+
+ Returns
+ -------
+
+ w, f_x_on_y, f_y_on_x, f_xy, Sw
+ 1) vector of frequencies
+ 2) function of the Granger causality of X on Y
+ 3) function of the Granger causality of Y on X
+ 4) function of the 'instantaneous causality' between X and Y
+ 5) spectral density matrix
+ """
+
+ w, Hw = transfer_function_xy(a, n_freqs=n_freqs)
+
+ sigma = cov[0, 0]
+ upsilon = cov[0, 1]
+ gamma = cov[1, 1]
+
+ # this transformation of the transfer functions computes the
+ # Granger causality of Y on X
+ gamma2 = gamma - upsilon ** 2 / sigma
+
+ Hxy = Hw[0, 1]
+ Hxx_hat = Hw[0, 0] + (upsilon / sigma) * Hxy
+
+ xx_auto_component = (sigma * Hxx_hat * Hxx_hat.conj()).real
+ cross_component = gamma2 * Hxy * Hxy.conj()
+ Sxx = xx_auto_component + cross_component
+ f_y_on_x = np.log(Sxx.real / xx_auto_component)
+
+ # this transformation computes the Granger causality of X on Y
+ sigma2 = sigma - upsilon ** 2 / gamma
+
+ Hyx = Hw[1, 0]
+ Hyy_hat = Hw[1, 1] + (upsilon / gamma) * Hyx
+ yy_auto_component = (gamma * Hyy_hat * Hyy_hat.conj()).real
+ cross_component = sigma2 * Hyx * Hyx.conj()
+ Syy = yy_auto_component + cross_component
+ f_x_on_y = np.log(Syy.real / yy_auto_component)
+
+ # now compute cross densities, using the latest transformation
+ Hxx = Hw[0, 0]
+ Hyx = Hw[1, 0]
+ Hxy_hat = Hw[0, 1] + (upsilon / gamma) * Hxx
+ Sxy = sigma2 * Hxx * Hyx.conj() + gamma * Hxy_hat * Hyy_hat.conj()
+ Syx = sigma2 * Hyx * Hxx.conj() + gamma * Hyy_hat * Hxy_hat.conj()
+
+ # can safely throw away imaginary part
+ # since Sxx and Syy are real, and Sxy == Syx*
+ detS = (Sxx * Syy - Sxy * Syx).real
+ f_xy = xx_auto_component * yy_auto_component
+ f_xy /= detS
+ f_xy = np.log(f_xy)
+
+ return w, f_x_on_y, f_y_on_x, f_xy, np.array([[Sxx, Sxy], [Syx, Syy]])
diff --git a/nitime/algorithms/cohere.py b/nitime/algorithms/cohere.py
new file mode 100644
index 0000000..300aa2d
--- /dev/null
+++ b/nitime/algorithms/cohere.py
@@ -0,0 +1,1287 @@
+"""
+
+Coherency is an analogue of correlation, calculated in the frequency
+domain. This is a useful quantity for describing a system of oscillators
+coupled with delay. This is because the coherency captures not only the
+magnitude of the time-shift-independent correlation between the time-series
+(termed 'coherence'), but can also be used in order to estimate the size of the
+time-delay (the phase-delay between the time-series in a particular frequency
+band).
+
+"""
+
+import numpy as np
+from nitime.lazy import scipy_fftpack as fftpack
+from nitime.lazy import matplotlib_mlab as mlab
+
+from .spectral import get_spectra, get_spectra_bi
+import nitime.utils as utils
+
+# To suppport older versions of numpy that don't have tril_indices:
+from nitime.index_utils import tril_indices
+
+
+def coherency(time_series, csd_method=None):
+ r"""
+ Compute the coherency between the spectra of n-tuple of time series.
+ Input to this function is in the time domain
+
+ Parameters
+ ----------
+
+ time_series : n*t float array
+ an array of n different time series of length t each
+
+ csd_method : dict, optional.
+ See :func:`get_spectra` documentation for details
+
+ Returns
+ -------
+
+ f : float array
+ The central frequencies for the frequency bands for which the spectra
+ are estimated
+
+ c : float array
+ This is a symmetric matrix with the coherencys of the signals. The
+ coherency of signal i and signal j is in f[i][j]. Note that f[i][j] =
+ f[j][i].conj()
+
+ Notes
+ -----
+
+ This is an implementation of equation (1) of Sun (2005):
+
+ .. math::
+
+ R_{xy} (\lambda) = \frac{f_{xy}(\lambda)}
+ {\sqrt{f_{xx} (\lambda) \cdot f_{yy}(\lambda)}}
+
+ F.T. Sun and L.M. Miller and M. D'Esposito (2005). Measuring temporal
+ dynamics of functional networks using phase spectrum of fMRI
+ data. Neuroimage, 28: 227-37.
+
+ """
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ #A container for the coherencys, with the size and shape of the expected
+ #output:
+ c = np.zeros((time_series.shape[0],
+ time_series.shape[0],
+ f.shape[0]), dtype=complex) # Make sure it's complex
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ c[i][j] = coherency_spec(fxy[i][j], fxy[i][i], fxy[j][j])
+
+ idx = tril_indices(time_series.shape[0], -1)
+ c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric
+
+ return f, c
+
+
+def coherency_spec(fxy, fxx, fyy):
+ r"""
+ Compute the coherency between the spectra of two time series.
+
+ Input to this function is in the frequency domain.
+
+ Parameters
+ ----------
+
+ fxy : float array
+ The cross-spectrum of the time series
+
+ fyy,fxx : float array
+ The spectra of the signals
+
+ Returns
+ -------
+
+ complex array
+ the frequency-band-dependent coherency
+
+ See also
+ --------
+ :func:`coherency`
+ """
+
+ return fxy / np.sqrt(fxx * fyy)
+
+
+def coherence(time_series, csd_method=None):
+ r"""Compute the coherence between the spectra of an n-tuple of time_series.
+
+ Parameters of this function are in the time domain.
+
+ Parameters
+ ----------
+ time_series : float array
+ an array of different time series with time as the last dimension
+
+ csd_method : dict, optional
+ See :func:`algorithms.spectral.get_spectra` documentation for details
+
+ Returns
+ -------
+ f : float array
+ The central frequencies for the frequency bands for which the spectra
+ are estimated
+
+ c : float array
+ This is a symmetric matrix with the coherencys of the signals. The
+ coherency of signal i and signal j is in f[i][j].
+
+ Notes
+ -----
+
+ This is an implementation of equation (2) of Sun (2005):
+
+ .. math::
+
+ Coh_{xy}(\lambda) = |{R_{xy}(\lambda)}|^2 =
+ \frac{|{f_{xy}(\lambda)}|^2}{f_{xx}(\lambda) \cdot f_{yy}(\lambda)}
+
+ F.T. Sun and L.M. Miller and M. D'Esposito (2005). Measuring temporal
+ dynamics of functional networks using phase spectrum of fMRI data.
+ Neuroimage, 28: 227-37.
+
+ """
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ # A container for the coherences, with the size and shape of the expected
+ # output:
+ c = np.zeros((time_series.shape[0],
+ time_series.shape[0],
+ f.shape[0]))
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ c[i][j] = coherence_spec(fxy[i][j], fxy[i][i], fxy[j][j])
+
+ idx = tril_indices(time_series.shape[0], -1)
+ c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric
+
+ return f, c
+
+
+def coherence_spec(fxy, fxx, fyy):
+ r"""
+ Compute the coherence between the spectra of two time series.
+
+ Parameters of this function are in the frequency domain.
+
+ Parameters
+ ----------
+
+ fxy : array
+ The cross-spectrum of the time series
+
+ fyy, fxx : array
+ The spectra of the signals
+
+ Returns
+ -------
+
+ float : a frequency-band-dependent measure of the linear association
+ between the two time series
+
+ See also
+ --------
+ :func:`coherence`
+ """
+ if not np.isrealobj(fxx):
+ fxx = np.real(fxx)
+ if not np.isrealobj(fyy):
+ fyy = np.real(fyy)
+ c = np.abs(fxy) ** 2 / (fxx * fyy)
+ return c
+
+
+def coherency_regularized(time_series, epsilon, alpha, csd_method=None):
+ r"""
+ Compute a regularized measure of the coherence.
+
+ Regularization may be needed in order to overcome numerical imprecisions
+
+ Parameters
+ ----------
+
+ time_series: float array
+ The time series data for which the regularized coherence is
+ calculated. Time as the last dimension.
+
+ epsilon: float
+ Small regularization parameter. Should be much smaller than any
+ meaningful value of coherence you might encounter
+
+ alpha: float
+ Large regularization parameter. Should be much larger than any
+ meaningful value of coherence you might encounter (preferably much
+ larger than 1).
+
+ csd_method: dict, optional.
+ See :func:`get_spectra` documentation for details
+
+ Returns
+ -------
+ f: float array
+ The central frequencies for the frequency bands for which the spectra
+ are estimated
+
+ c: float array
+ This is a symmetric matrix with the coherencys of the signals. The
+ coherency of signal i and signal j is in f[i][j]. Note that f[i][j] =
+ f[j][i].conj()
+
+
+ Notes
+ -----
+ The regularization scheme is as follows:
+
+ .. math::
+
+ Coh_{xy}^R = \frac{(\alpha f_{xx} + \epsilon) ^2}
+ {\alpha^{2}(f_{xx}+\epsilon)(f_{yy}+\epsilon)}
+
+
+ """
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ # A container for the coherences, with the size and shape of the expected
+ # output:
+ c = np.zeros((time_series.shape[0],
+ time_series.shape[0],
+ f.shape[0]), dtype=complex) # Make sure it's complex
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ c[i][j] = _coherency_reqularized(fxy[i][j], fxy[i][i],
+ fxy[j][j], epsilon, alpha)
+
+ idx = tril_indices(time_series.shape[0], -1)
+ c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric
+
+ return f, c
+
+
+def _coherency_reqularized(fxy, fxx, fyy, epsilon, alpha):
+
+ r"""
+ A regularized version of the calculation of coherency, which is more
+ robust to numerical noise than the standard calculation
+
+ Input to this function is in the frequency domain.
+
+ Parameters
+ ----------
+
+ fxy, fxx, fyy: float arrays
+ The cross- and power-spectral densities of the two signals x and y
+
+ epsilon: float
+ First regularization parameter. Should be much smaller than any
+ meaningful value of coherence you might encounter
+
+ alpha: float
+ Second regularization parameter. Should be much larger than any
+ meaningful value of coherence you might encounter (preferably much
+ larger than 1).
+
+ Returns
+ -------
+ float array
+ The coherence values
+
+ """
+
+ return (((alpha * fxy + epsilon)) /
+ np.sqrt(((alpha ** 2) * (fxx + epsilon) * (fyy + epsilon))))
+
+
+def coherence_regularized(time_series, epsilon, alpha, csd_method=None):
+ r"""
+ Same as coherence, except regularized in order to overcome numerical
+ imprecisions
+
+ Parameters
+ ----------
+
+ time_series: n-d float array
+ The time series data for which the regularized coherence is calculated
+
+ epsilon: float
+ Small regularization parameter. Should be much smaller than any
+ meaningful value of coherence you might encounter
+
+ alpha: float
+ large regularization parameter. Should be much larger than any
+ meaningful value of coherence you might encounter (preferably much
+ larger than 1).
+
+ csd_method: dict, optional.
+ See :func:`get_spectra` documentation for details
+
+ Returns
+ -------
+ f: float array
+ The central frequencies for the frequency bands for which the spectra
+ are estimated
+
+ c: n-d array
+ This is a symmetric matrix with the coherencys of the signals. The
+ coherency of signal i and signal j is in f[i][j].
+
+ Returns
+ -------
+ frequencies, coherence
+
+ Notes
+ -----
+ The regularization scheme is as follows:
+
+ .. math::
+
+ C_{x,y} = \frac{(\alpha f_{xx} + \epsilon)^2}
+ {\alpha^{2}((f_{xx}+\epsilon)(f_{yy}+\epsilon))}
+
+ """
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ #A container for the coherences, with the size and shape of the expected
+ #output:
+ c = np.zeros((time_series.shape[0],
+ time_series.shape[0],
+ f.shape[0]), complex)
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ c[i][j] = _coherence_reqularized(fxy[i][j], fxy[i][i],
+ fxy[j][j], epsilon, alpha)
+
+ idx = tril_indices(time_series.shape[0], -1)
+ c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric
+
+ return f, c
+
+
+def _coherence_reqularized(fxy, fxx, fyy, epsilon, alpha):
+
+ r"""A regularized version of the calculation of coherence, which is more
+ robust to numerical noise than the standard calculation.
+
+ Input to this function is in the frequency domain
+
+ Parameters
+ ----------
+
+ fxy, fxx, fyy: float arrays
+ The cross- and power-spectral densities of the two signals x and y
+
+ epsilon: float
+ First regularization parameter. Should be much smaller than any
+ meaningful value of coherence you might encounter
+
+ alpha: float
+ Second regularization parameter. Should be much larger than any
+ meaningful value of coherence you might encounter (preferably much
+ larger than 1)
+
+ Returns
+ -------
+ float array
+ The coherence values
+
+ """
+ return (((alpha * np.abs(fxy) + epsilon) ** 2) /
+ ((alpha ** 2) * (fxx + epsilon) * (fyy + epsilon)))
+
+
+def coherency_bavg(time_series, lb=0, ub=None, csd_method=None):
+ r"""
+ Compute the band-averaged coherency between the spectra of two time series.
+
+ Input to this function is in the time domain.
+
+ Parameters
+ ----------
+ time_series: n*t float array
+ an array of n different time series of length t each
+
+ lb, ub: float, optional
+ the upper and lower bound on the frequency band to be used in averaging
+ defaults to 1,max(f)
+
+ csd_method: dict, optional.
+ See :func:`get_spectra` documentation for details
+
+ Returns
+ -------
+ c: float array
+ This is an upper-diagonal array, where c[i][j] is the band-averaged
+ coherency between time_series[i] and time_series[j]
+
+ Notes
+ -----
+
+ This is an implementation of equation (A4) of Sun(2005):
+
+ .. math::
+
+ \bar{Coh_{xy}} (\bar{\lambda}) =
+ \frac{\left|{\sum_\lambda{\hat{f_{xy}}}}\right|^2}
+ {\sum_\lambda{\hat{f_{xx}}}\cdot sum_\lambda{\hat{f_{yy}}}}
+
+ F.T. Sun and L.M. Miller and M. D'Esposito (2005). Measuring
+ temporal dynamics of functional networks using phase spectrum of fMRI
+ data. Neuroimage, 28: 227-37.
+ """
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ lb_idx, ub_idx = utils.get_bounds(f, lb, ub)
+
+ if lb == 0:
+ lb_idx = 1 # The lowest frequency band should be f0
+
+ c = np.zeros((time_series.shape[0],
+ time_series.shape[0]), dtype=complex)
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ c[i][j] = _coherency_bavg(fxy[i][j][lb_idx:ub_idx],
+ fxy[i][i][lb_idx:ub_idx],
+ fxy[j][j][lb_idx:ub_idx])
+
+ idx = tril_indices(time_series.shape[0], -1)
+ c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric
+
+ return c
+
+
+def _coherency_bavg(fxy, fxx, fyy):
+ r"""
+ Compute the band-averaged coherency between the spectra of two time series.
+
+ Input to this function is in the frequency domain.
+
+ Parameters
+ ----------
+
+ fxy : float array
+ The cross-spectrum of the time series
+
+ fyy,fxx : float array
+ The spectra of the signals
+
+ Returns
+ -------
+
+ float
+ the band-averaged coherency
+
+ Notes
+ -----
+
+ This is an implementation of equation (A4) of [Sun2005]_:
+
+ .. math::
+
+ \bar{Coh_{xy}} (\bar{\lambda}) =
+ \frac{\left|{\sum_\lambda{\hat{f_{xy}}}}\right|^2}
+ {\sum_\lambda{\hat{f_{xx}}}\cdot sum_\lambda{\hat{f_{yy}}}}
+
+ .. [Sun2005] F.T. Sun and L.M. Miller and M. D'Esposito(2005). Measuring
+ temporal dynamics of functional networks using phase spectrum of fMRI
+ data. Neuroimage, 28: 227-37.
+ """
+
+ # Average the phases and the magnitudes separately and then recombine:
+
+ p = np.angle(fxy)
+ p_bavg = np.mean(p)
+
+ m = np.abs(coherency_spec(fxy, fxx, fyy))
+ m_bavg = np.mean(m)
+
+ # Recombine according to z = r(cos(phi)+sin(phi)i):
+ return m_bavg * (np.cos(p_bavg) + np.sin(p_bavg) * 1j)
+
+
+def coherence_bavg(time_series, lb=0, ub=None, csd_method=None):
+ r"""
+ Compute the band-averaged coherence between the spectra of two time series.
+
+ Input to this function is in the time domain.
+
+ Parameters
+ ----------
+ time_series : float array
+ An array of time series, time as the last dimension.
+
+ lb, ub: float, optional
+ The upper and lower bound on the frequency band to be used in averaging
+ defaults to 1,max(f)
+
+ csd_method: dict, optional.
+ See :func:`get_spectra` documentation for details
+
+ Returns
+ -------
+ c : float
+ This is an upper-diagonal array, where c[i][j] is the band-averaged
+ coherency between time_series[i] and time_series[j]
+ """
+
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ lb_idx, ub_idx = utils.get_bounds(f, lb, ub)
+
+ if lb == 0:
+ lb_idx = 1 # The lowest frequency band should be f0
+
+ c = np.zeros((time_series.shape[0],
+ time_series.shape[0]))
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ c[i][j] = _coherence_bavg(fxy[i][j][lb_idx:ub_idx],
+ fxy[i][i][lb_idx:ub_idx],
+ fxy[j][j][lb_idx:ub_idx])
+
+ idx = tril_indices(time_series.shape[0], -1)
+ c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric
+
+ return c
+
+
+def _coherence_bavg(fxy, fxx, fyy):
+ r"""
+ Compute the band-averaged coherency between the spectra of two time series.
+ input to this function is in the frequency domain
+
+ Parameters
+ ----------
+
+ fxy : float array
+ The cross-spectrum of the time series
+
+ fyy,fxx : float array
+ The spectra of the signals
+
+ Returns
+ -------
+
+ float :
+ the band-averaged coherence
+ """
+ if not np.isrealobj(fxx):
+ fxx = np.real(fxx)
+ if not np.isrealobj(fyy):
+ fyy = np.real(fyy)
+
+ return (np.abs(fxy.sum()) ** 2) / (fxx.sum() * fyy.sum())
+
+
+def coherence_partial(time_series, r, csd_method=None):
+ r"""
+ Compute the band-specific partial coherence between the spectra of
+ two time series.
+
+ The partial coherence is the part of the coherence between x and
+ y, which cannot be attributed to a common cause, r.
+
+ Input to this function is in the time domain.
+
+ Parameters
+ ----------
+
+ time_series: float array
+ An array of time-series, with time as the last dimension.
+
+ r: float array
+ This array represents the temporal sequence of the common cause to be
+ partialed out, sampled at the same rate as time_series
+
+ csd_method: dict, optional
+ See :func:`get_spectra` documentation for details
+
+
+ Returns
+ -------
+ f: array,
+ The mid-frequencies of the frequency bands in the spectral
+ decomposition
+
+ c: float array
+ The frequency dependent partial coherence between time_series i and
+ time_series j in c[i][j] and in c[j][i], with r partialed out
+
+
+ Notes
+ -----
+
+ This is an implementation of equation (2) of Sun (2004):
+
+ .. math::
+
+ Coh_{xy|r} = \frac{|{R_{xy}(\lambda) - R_{xr}(\lambda)
+ R_{ry}(\lambda)}|^2}{(1-|{R_{xr}}|^2)(1-|{R_{ry}}|^2)}
+
+ F.T. Sun and L.M. Miller and M. D'Esposito (2004). Measuring interregional
+ functional connectivity using coherence and partial coherence analyses of
+ fMRI data Neuroimage, 21: 647-58.
+ """
+
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ # Initialize c according to the size of f:
+ c = np.zeros((time_series.shape[0],
+ time_series.shape[0],
+ f.shape[0]), dtype=complex)
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ f, fxx, frr, frx = get_spectra_bi(time_series[i], r, csd_method)
+ f, fyy, frr, fry = get_spectra_bi(time_series[j], r, csd_method)
+ c[i, j] = coherence_partial_spec(fxy[i][j], fxy[i][i],
+ fxy[j][j], frx, fry, frr)
+
+ idx = tril_indices(time_series.shape[0], -1)
+ c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric
+
+ return f, c
+
+
+def coherence_partial_spec(fxy, fxx, fyy, fxr, fry, frr):
+ r"""
+ Compute the band-specific partial coherence between the spectra of
+ two time series. See :func:`partial_coherence`.
+
+ Input to this function is in the frequency domain.
+
+ Parameters
+ ----------
+ fxy : float array
+ The cross-spectrum of the time series
+
+ fyy, fxx : float array
+ The spectra of the signals
+
+ fxr, fry : float array
+ The cross-spectra of the signals with the event
+
+ Returns
+ -------
+ float
+ the band-averaged coherency
+ """
+ coh = coherency_spec
+ Rxr = coh(fxr, fxx, frr)
+ Rry = coh(fry, fyy, frr)
+ Rxy = coh(fxy, fxx, fyy)
+
+ return (((np.abs(Rxy - Rxr * Rry)) ** 2) /
+ ((1 - ((np.abs(Rxr)) ** 2)) * (1 - ((np.abs(Rry)) ** 2))))
+
+
+def coherency_phase_spectrum(time_series, csd_method=None):
+ r"""
+ Compute the phase spectrum of the cross-spectrum between two time series.
+
+ The parameters of this function are in the time domain.
+
+ Parameters
+ ----------
+
+ time_series : n*t float array
+ The time series, with t, time, as the last dimension
+
+ Returns
+ -------
+
+ f : mid frequencies of the bands
+
+ p : an array with the pairwise phase spectrum between the time
+ series, where p[i][j] is the phase spectrum between time series[i] and
+ time_series[j]
+
+ Notes
+ -----
+
+ This is an implementation of equation (3) of Sun et al. (2005) [Sun2005]_:
+
+ .. math::
+
+ \phi(\lambda) = arg [R_{xy} (\lambda)] = arg [f_{xy} (\lambda)]
+
+ F.T. Sun and L.M. Miller and M. D'Esposito (2005). Measuring temporal
+ dynamics of functional networks using phase spectrum of fMRI data.
+ Neuroimage, 28: 227-37.
+ """
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ p = np.zeros((time_series.shape[0],
+ time_series.shape[0],
+ f.shape[0]))
+
+ for i in range(time_series.shape[0]):
+ for j in range(i + 1, time_series.shape[0]):
+ p[i][j] = np.angle(fxy[i][j])
+ p[j][i] = np.angle(fxy[i][j].conjugate())
+
+ return f, p
+
+
+def coherency_phase_delay(time_series, lb=0, ub=None, csd_method=None):
+ """
+ The temporal delay calculated from the coherency phase spectrum.
+
+ Parameters
+ ----------
+
+ time_series: float array
+ The time-series data for which the delay is calculated.
+
+ lb, ub: float
+ Frequency boundaries (in Hz), for the domain over which the delays are
+ calculated. Defaults to 0-max(f)
+
+ csd_method : dict, optional.
+ See :func:`get_spectra`
+
+ Returns
+ -------
+ f : float array
+ The mid-frequencies for the frequency bands over which the calculation
+ is done.
+ p : float array
+ Pairwise temporal delays between time-series (in seconds).
+
+ """
+ if csd_method is None:
+ csd_method = {'this_method': 'welch'} # The default
+
+ f, fxy = get_spectra(time_series, csd_method)
+
+ lb_idx, ub_idx = utils.get_bounds(f, lb, ub)
+
+ if lb_idx == 0:
+ lb_idx = 1
+
+ p = np.zeros((time_series.shape[0], time_series.shape[0],
+ f[lb_idx:ub_idx].shape[-1]))
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ p[i][j] = _coherency_phase_delay(f[lb_idx:ub_idx],
+ fxy[i][j][lb_idx:ub_idx])
+ p[j][i] = _coherency_phase_delay(f[lb_idx:ub_idx],
+ fxy[i][j][lb_idx:ub_idx].conjugate())
+
+ return f[lb_idx:ub_idx], p
+
+
+def _coherency_phase_delay(f, fxy):
+ r"""
+ Compute the phase delay between the spectra of two signals. The input to
+ this function is in the frequency domain.
+
+ Parameters
+ ----------
+
+ f: float array
+ The frequencies
+
+ fxy : float array
+ The cross-spectrum of the time series
+
+ Returns
+ -------
+
+ float array
+ the phase delay (in sec) for each frequency band.
+
+ """
+
+ return np.angle(fxy) / (2 * np.pi * f)
+
+
+def correlation_spectrum(x1, x2, Fs=2 * np.pi, norm=False):
+ """
+ Calculate the spectral decomposition of the correlation.
+
+ Parameters
+ ----------
+ x1,x2: ndarray
+ Two arrays to be correlated. Same dimensions
+
+ Fs: float, optional
+ Sampling rate in Hz. If provided, an array of
+ frequencies will be returned.Defaults to 2
+
+ norm: bool, optional
+ When this is true, the spectrum is normalized to sum to 1
+
+ Returns
+ -------
+ f: ndarray
+ ndarray with the frequencies
+
+ ccn: ndarray
+ The spectral decomposition of the correlation
+
+ Notes
+ -----
+
+ This method is described in full in: D Cordes, V M Haughton, K Arfanakis, G
+ J Wendt, P A Turski, C H Moritz, M A Quigley, M E Meyerand (2000). Mapping
+ functionally related regions of brain with functional connectivity MR
+ imaging. AJNR American journal of neuroradiology 21:1636-44
+
+ """
+
+ x1 = x1 - np.mean(x1)
+ x2 = x2 - np.mean(x2)
+ x1_f = fftpack.fft(x1)
+ x2_f = fftpack.fft(x2)
+ D = np.sqrt(np.sum(x1 ** 2) * np.sum(x2 ** 2))
+ n = x1.shape[0]
+
+ ccn = ((np.real(x1_f) * np.real(x2_f) +
+ np.imag(x1_f) * np.imag(x2_f)) /
+ (D * n))
+
+ if norm:
+ ccn = ccn / np.sum(ccn) * 2 # Only half of the sum is sent back
+ # because of the freq domain symmetry.
+ # XXX Does normalization make this
+ # strictly positive?
+
+ f = utils.get_freqs(Fs, n)
+ return f, ccn[0:(n / 2 + 1)]
+
+
+#------------------------------------------------------------------------
+#Coherency calculated using cached spectra
+#------------------------------------------------------------------------
+"""The idea behind this set of functions is to keep a cache of the windowed fft
+calculations of each time-series in a massive collection of time-series, so
+that this calculation doesn't have to be repeated each time a cross-spectrum is
+calculated. The first function creates the cache and then, another function
+takes the cached spectra and calculates PSDs and CSDs, which are then passed to
+coherency_spec and organized in a data structure similar to the one
+created by coherence"""
+
+
+def cache_fft(time_series, ij, lb=0, ub=None,
+ method=None, prefer_speed_over_memory=False,
+ scale_by_freq=True):
+ """compute and cache the windowed FFTs of the time_series, in such a way
+ that computing the psd and csd of any combination of them can be done
+ quickly.
+
+ Parameters
+ ----------
+
+ time_series : float array
+ An ndarray with time-series, where time is the last dimension
+
+ ij: list of tuples
+ Each tuple in this variable should contain a pair of
+ indices of the form (i,j). The resulting cache will contain the fft of
+ time-series in the rows indexed by the unique elements of the union of i
+ and j
+
+ lb,ub: float
+ Define a frequency band of interest, for which the fft will be cached
+
+ method: dict, optional
+ See :func:`get_spectra` for details on how this is used. For this set
+ of functions, 'this_method' has to be 'welch'
+
+
+ Returns
+ -------
+ freqs, cache
+
+ where: cache =
+ {'FFT_slices':FFT_slices,'FFT_conj_slices':FFT_conj_slices,
+ 'norm_val':norm_val}
+
+ Notes
+ -----
+
+ - For these functions, only the Welch windowed periodogram ('welch') is
+ available.
+
+ - Detrending the input is not an option here, in order to save
+ time on an empty function call.
+
+ """
+ if method is None:
+ method = {'this_method': 'welch'} # The default
+
+ this_method = method.get('this_method', 'welch')
+
+ if this_method == 'welch':
+ NFFT = method.get('NFFT', 64)
+ Fs = method.get('Fs', 2 * np.pi)
+ window = method.get('window', mlab.window_hanning)
+ n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0)))
+ else:
+ e_s = "For cache_fft, spectral estimation method must be welch"
+ raise ValueError(e_s)
+ time_series = utils.zero_pad(time_series, NFFT)
+
+ #The shape of the zero-padded version:
+ n_channels, n_time_points = time_series.shape
+
+ # get all the unique channels in time_series that we are interested in by
+ # checking the ij tuples
+ all_channels = set()
+ for i, j in ij:
+ all_channels.add(i)
+ all_channels.add(j)
+
+ # for real time_series, ignore the negative frequencies
+ if np.iscomplexobj(time_series):
+ n_freqs = NFFT
+ else:
+ n_freqs = NFFT // 2 + 1
+
+ #Which frequencies
+ freqs = utils.get_freqs(Fs, NFFT)
+
+ #If there are bounds, limit the calculation to within that band,
+ #potentially include the DC component:
+ lb_idx, ub_idx = utils.get_bounds(freqs, lb, ub)
+
+ n_freqs = ub_idx - lb_idx
+ #Make the window:
+ if mlab.cbook.iterable(window):
+ assert(len(window) == NFFT)
+ window_vals = window
+ else:
+ window_vals = window(np.ones(NFFT, time_series.dtype))
+
+ #Each fft needs to be normalized by the square of the norm of the window
+ #and, for consistency with newer versions of mlab.csd (which, in turn, are
+ #consistent with Matlab), normalize also by the sampling rate:
+
+ if scale_by_freq:
+ #This is the normalization factor for one-sided estimation, taking into
+ #account the sampling rate. This makes the PSD a density function, with
+ #units of dB/Hz, so that integrating over frequencies gives you the RMS
+ #(XXX this should be in the tests!).
+ norm_val = (np.abs(window_vals) ** 2).sum() * (Fs / 2)
+
+ else:
+ norm_val = (np.abs(window_vals) ** 2).sum() / 2
+
+ # cache the FFT of every windowed, detrended NFFT length segement
+ # of every channel. If prefer_speed_over_memory, cache the conjugate
+ # as well
+
+ i_times = list(range(0, n_time_points - NFFT + 1, NFFT - n_overlap))
+ n_slices = len(i_times)
+ FFT_slices = {}
+ FFT_conj_slices = {}
+
+ for i_channel in all_channels:
+ #dbg:
+ #print i_channel
+ Slices = np.zeros((n_slices, n_freqs), dtype=np.complex)
+ for iSlice in range(n_slices):
+ thisSlice = time_series[i_channel,
+ i_times[iSlice]:i_times[iSlice] + NFFT]
+
+ #Windowing:
+ thisSlice = window_vals * thisSlice # No detrending
+ #Derive the fft for that slice:
+ Slices[iSlice, :] = (fftpack.fft(thisSlice)[lb_idx:ub_idx])
+
+ FFT_slices[i_channel] = Slices
+
+ if prefer_speed_over_memory:
+ FFT_conj_slices[i_channel] = np.conjugate(Slices)
+
+ cache = {'FFT_slices': FFT_slices, 'FFT_conj_slices': FFT_conj_slices,
+ 'norm_val': norm_val, 'Fs': Fs, 'scale_by_freq': scale_by_freq}
+
+ return freqs, cache
+
+
+def cache_to_psd(cache, ij):
+ """
+ From a set of cached windowed fft, calculate the psd
+
+ Parameters
+ ----------
+ cache : dict
+ Return value from :func:`cache_fft`
+
+ ij : list
+ A list of tuples of the form (i,j).
+
+ Returns
+ -------
+ Pxx : dict
+ The phases for the intersection of (time_series[i],time_series[j]). The
+ keys are the intersection of i,j values in the parameter ij
+
+ """
+ # This is the way it is saved by cache_spectra:
+ FFT_slices = cache['FFT_slices']
+ FFT_conj_slices = cache['FFT_conj_slices']
+ norm_val = cache['norm_val']
+ # Fs = cache['Fs']
+
+ # This is where the output goes to:
+ Pxx = {}
+ all_channels = set()
+ for i, j in ij:
+ all_channels.add(i)
+ all_channels.add(j)
+
+ for i in all_channels:
+ #dbg:
+ #print i
+ #If we made the conjugate slices:
+ if FFT_conj_slices:
+ Pxx[i] = FFT_slices[i] * FFT_conj_slices[i]
+ else:
+ Pxx[i] = FFT_slices[i] * np.conjugate(FFT_slices[i])
+
+ #If there is more than one window
+ if FFT_slices[i].shape[0] > 1:
+ Pxx[i] = np.mean(Pxx[i], 0)
+
+ Pxx[i] /= norm_val
+ # Correct for the NFFT/2 and DC components:
+ Pxx[i][[0, -1]] /= 2
+
+ return Pxx
+
+
+def cache_to_phase(cache, ij):
+ """ From a set of cached set of windowed fft's, calculate the
+ frequency-band dependent phase for each of the channels in ij.
+ Note that this returns the absolute phases of the time-series, not the
+ relative phases between them. In order to get relative phases, use
+ cache_to_relative_phase
+
+ Parameters
+ ----------
+ cache : dict
+ The return value of :func:`cache_fft`
+
+ ij: list
+ A list of tuples of the form (i,j) for all the indices for which to
+ calculate the phases
+
+ Returns
+ -------
+
+ Phase : dict
+ The individual phases, keys are all the i and j in ij, such that
+ Phase[i] gives you the phase for the time-series i in the input to
+ :func:`cache_fft`
+
+ """
+ FFT_slices = cache['FFT_slices']
+
+ Phase = {}
+
+ all_channels = set()
+ for i, j in ij:
+ all_channels.add(i)
+ all_channels.add(j)
+
+ for i in all_channels:
+ Phase[i] = np.angle(FFT_slices[i])
+ #If there is more than one window, average over all the windows:
+ if FFT_slices[i].shape[0] > 1:
+ Phase[i] = np.mean(Phase[i], 0)
+
+ return Phase
+
+
+def cache_to_relative_phase(cache, ij):
+ """ From a set of cached set of windowed fft's, calculate the
+ frequency-band dependent relative phase for the combinations ij.
+
+ Parameters
+ ----------
+ cache: dict
+ The return value from :func:`cache_fft`
+
+ ij: list
+ A list of tuples of the form (i,j), all the pairs of indices for which
+ to calculate the relative phases
+
+ Returns
+ -------
+
+ Phi_xy : dict
+ The relative phases between the time-series i and j. Such that
+ Phi_xy[i,j] is the phase from time_series[i] to time_series[j].
+
+ Note
+ ----
+
+ This function will give you a different result than using
+ :func:`coherency_phase_spectrum`. This is because
+ :func:`coherency_phase_spectrum` calculates the angle based on the average
+ psd, whereas this function calculates the average of the angles calculated
+ on individual windows.
+
+ """
+ # This is the way it is saved by cache_spectra:
+ FFT_slices = cache['FFT_slices']
+ FFT_conj_slices = cache['FFT_conj_slices']
+ # norm_val = cache['norm_val']
+
+ freqs = cache['FFT_slices'][ij[0][0]].shape[-1]
+
+ ij_array = np.array(ij)
+
+ channels_i = max(1, max(ij_array[:, 0]) + 1)
+ channels_j = max(1, max(ij_array[:, 1]) + 1)
+ #Pre-allocate for speed:
+ Phi_xy = np.zeros((channels_i, channels_j, freqs), dtype=np.complex)
+
+ #These checks take time, so do them up front, not in every iteration:
+ if list(FFT_slices.items())[0][1].shape[0] > 1:
+ if FFT_conj_slices:
+ for i, j in ij:
+ phi = np.angle(FFT_slices[i] * FFT_conj_slices[j])
+ Phi_xy[i, j] = np.mean(phi, 0)
+
+ else:
+ for i, j in ij:
+ phi = np.angle(FFT_slices[i] * np.conjugate(FFT_slices[j]))
+ Phi_xy[i, j] = np.mean(phi, 0)
+
+ else:
+ if FFT_conj_slices:
+ for i, j in ij:
+ Phi_xy[i, j] = np.angle(FFT_slices[i] * FFT_conj_slices[j])
+
+ else:
+ for i, j in ij:
+ Phi_xy[i, j] = np.angle(FFT_slices[i] *
+ np.conjugate(FFT_slices[j]))
+
+ return Phi_xy
+
+
+def cache_to_coherency(cache, ij):
+ """From a set of cached spectra, calculate the coherency
+ relationships
+
+ Parameters
+ ----------
+ cache: dict
+ the return value from :func:`cache_fft`
+
+ ij: list
+ a list of (i,j) tuples, the pairs of indices for which the
+ cross-coherency is to be calculated
+
+ Returns
+ -------
+ Cxy: dict
+ coherence values between the time-series ij. Indexing into this dict
+ takes the form Cxy[i,j] in order to extract the coherency between
+ time-series i and time-series j in the original input to
+ :func:`cache_fft`
+ """
+
+ #This is the way it is saved by cache_spectra:
+ FFT_slices = cache['FFT_slices']
+ FFT_conj_slices = cache['FFT_conj_slices']
+ norm_val = cache['norm_val']
+
+ freqs = cache['FFT_slices'][ij[0][0]].shape[-1]
+
+ ij_array = np.array(ij)
+
+ channels_i = max(1, max(ij_array[:, 0]) + 1)
+ channels_j = max(1, max(ij_array[:, 1]) + 1)
+ Cxy = np.zeros((channels_i, channels_j, freqs), dtype=np.complex)
+
+ #These checks take time, so do them up front, not in every iteration:
+ if list(FFT_slices.items())[0][1].shape[0] > 1:
+ if FFT_conj_slices:
+ for i, j in ij:
+ #dbg:
+ #print i,j
+ Pxy = FFT_slices[i] * FFT_conj_slices[j]
+ Pxx = FFT_slices[i] * FFT_conj_slices[i]
+ Pyy = FFT_slices[j] * FFT_conj_slices[j]
+ Pxx = np.mean(Pxx, 0)
+ Pyy = np.mean(Pyy, 0)
+ Pxy = np.mean(Pxy, 0)
+ Pxy /= norm_val
+ Pxx /= norm_val
+ Pyy /= norm_val
+ Cxy[i, j] = Pxy / np.sqrt(Pxx * Pyy)
+
+ else:
+ for i, j in ij:
+ Pxy = FFT_slices[i] * np.conjugate(FFT_slices[j])
+ Pxx = FFT_slices[i] * np.conjugate(FFT_slices[i])
+ Pyy = FFT_slices[j] * np.conjugate(FFT_slices[j])
+ Pxx = np.mean(Pxx, 0)
+ Pyy = np.mean(Pyy, 0)
+ Pxy = np.mean(Pxy, 0)
+ Pxy /= norm_val
+ Pxx /= norm_val
+ Pyy /= norm_val
+ Cxy[i, j] = Pxy / np.sqrt(Pxx * Pyy)
+ else:
+ if FFT_conj_slices:
+ for i, j in ij:
+ Pxy = FFT_slices[i] * FFT_conj_slices[j]
+ Pxx = FFT_slices[i] * FFT_conj_slices[i]
+ Pyy = FFT_slices[j] * FFT_conj_slices[j]
+ Pxy /= norm_val
+ Pxx /= norm_val
+ Pyy /= norm_val
+ Cxy[i, j] = Pxy / np.sqrt(Pxx * Pyy)
+
+ else:
+ for i, j in ij:
+ Pxy = FFT_slices[i] * np.conjugate(FFT_slices[j])
+ Pxx = FFT_slices[i] * np.conjugate(FFT_slices[i])
+ Pyy = FFT_slices[j] * np.conjugate(FFT_slices[j])
+ Pxy /= norm_val
+ Pxx /= norm_val
+ Pyy /= norm_val
+ Cxy[i, j] = Pxy / np.sqrt(Pxx * Pyy)
+
+ return Cxy
diff --git a/nitime/algorithms/correlation.py b/nitime/algorithms/correlation.py
new file mode 100644
index 0000000..488a721
--- /dev/null
+++ b/nitime/algorithms/correlation.py
@@ -0,0 +1,16 @@
+import numpy as np
+
+__all__ = ["seed_corrcoef"]
+
+
+def seed_corrcoef(seed, target):
+ """Compute seed-based correlation coefficient"""
+
+ x = target - np.mean(target, -1)[..., np.newaxis]
+ y = seed - np.mean(seed)
+ xx = np.sum(x ** 2, -1)
+ yy = np.sum(y ** 2, -1)
+ xy = np.dot(x, y)
+ r = xy / np.sqrt(xx * yy)
+
+ return r
diff --git a/nitime/algorithms/event_related.py b/nitime/algorithms/event_related.py
new file mode 100644
index 0000000..be6f86e
--- /dev/null
+++ b/nitime/algorithms/event_related.py
@@ -0,0 +1,150 @@
+"""
+
+Event-related analysis
+
+"""
+
+import numpy as np
+from nitime.lazy import scipy_linalg as linalg
+from nitime.lazy import scipy_fftpack as fftpack
+
+
+def fir(timeseries, design):
+ """
+ Calculate the FIR (finite impulse response) HRF, according to [Burock2000]_
+
+ Parameters
+ ----------
+
+ timeseries : float array
+ timeseries data
+
+ design : int array
+ This is a design matrix. It has to have shape = (number
+ of TRS, number of conditions * length of HRF)
+
+ The form of the matrix is:
+
+ A B C ...
+
+ where A is a (number of TRs) x (length of HRF) matrix with a unity
+ matrix placed with its top left corner placed in each TR in which
+ event of type A occured in the design. B is the equivalent for
+ events of type B, etc.
+
+ Returns
+ -------
+
+ HRF: float array
+ HRF is a numpy array of 1X(length of HRF * number of conditions)
+ with the HRFs for the different conditions concatenated. This is an
+ estimate of the linear filters between the time-series and the events
+ described in design.
+
+ Notes
+ -----
+
+ Implements equation 4 in Burock(2000):
+
+ .. math::
+
+ \hat{h} = (X^T X)^{-1} X^T y
+
+ M.A. Burock and A.M.Dale (2000). Estimation and Detection of Event-Related
+ fMRI Signals with Temporally Correlated Noise: A Statistically Efficient
+ and Unbiased Approach. Human Brain Mapping, 11:249-260
+
+ """
+ X = np.matrix(design)
+ y = np.matrix(timeseries)
+ h = np.array(linalg.pinv(X.T * X) * X.T * y.T)
+ return h
+
+
+def freq_domain_xcorr(tseries, events, t_before, t_after, Fs=1):
+ """
+ Calculates the event related timeseries, using a cross-correlation in the
+ frequency domain.
+
+ Parameters
+ ----------
+ tseries: float array
+ Time series data with time as the last dimension
+
+ events: float array
+ An array with time-resolved events, at the same sampling rate as tseries
+
+ t_before: float
+ Time before the event to include
+
+ t_after: float
+ Time after the event to include
+
+ Fs: float
+ Sampling rate of the time-series (in Hz)
+
+ Returns
+ -------
+ xcorr: float array
+ The correlation function between the tseries and the events. Can be
+ interperted as a linear filter from events to responses (the
+ time-series) of an LTI.
+
+ """
+ fft = fftpack.fft
+ ifft = fftpack.ifft
+ fftshift = fftpack.fftshift
+
+ xcorr = np.real(fftshift(ifft(fft(tseries) *
+ fft(np.fliplr([events])))))
+
+ return xcorr[0][np.ceil(len(xcorr[0]) / 2) - t_before * Fs:
+ np.ceil(len(xcorr[0]) / 2) + t_after / 2 * Fs] / np.sum(events)
+
+
+def freq_domain_xcorr_zscored(tseries, events, t_before, t_after, Fs=1):
+ """
+ Calculates the z-scored event related timeseries, using a cross-correlation
+ in the frequency domain.
+
+ Parameters
+ ----------
+ tseries: float array
+ Time series data with time as the last dimension
+
+ events: float array
+ An array with time-resolved events, at the same sampling rate as tseries
+
+ t_before: float
+ Time before the event to include
+
+ t_after: float
+ Time after the event to include
+
+ Fs: float
+ Sampling rate of the time-series (in Hz)
+
+ Returns
+ -------
+ xcorr: float array
+ The correlation function between the tseries and the events. Can be
+ interperted as a linear filter from events to responses (the
+ time-series) of an LTI. Because it is normalized to its own mean and
+ variance, it can be interperted as measuring statistical significance
+ relative to all time-shifted versions of the events.
+
+ """
+
+ fft = fftpack.fft
+ ifft = fftpack.ifft
+ fftshift = fftpack.fftshift
+
+ xcorr = np.real(fftshift(ifft(fft(tseries) * fft(np.fliplr([events])))))
+
+ meanSurr = np.mean(xcorr)
+ stdSurr = np.std(xcorr)
+
+ return (((xcorr[0][np.ceil(len(xcorr[0]) / 2) - t_before * Fs:
+ np.ceil(len(xcorr[0]) / 2) + t_after * Fs])
+ - meanSurr)
+ / stdSurr)
diff --git a/nitime/algorithms/filter.py b/nitime/algorithms/filter.py
new file mode 100644
index 0000000..829600a
--- /dev/null
+++ b/nitime/algorithms/filter.py
@@ -0,0 +1,94 @@
+import numpy as np
+
+
+def boxcar_filter(time_series, lb=0, ub=0.5, n_iterations=2):
+ """
+ Filters data into a frequency range.
+
+ For each of the two bounds, a low-passed version is created by convolving
+ with a box-car and then the low-passed version for the upper bound is added
+ to the low-passed version for the lower bound subtracted from the signal,
+ resulting in a band-passed version
+
+ Parameters
+ ----------
+
+ time_series: float array
+ the signal
+ ub : float, optional
+ The cut-off frequency for the low-pass filtering as a proportion of the
+ sampling rate. Default to 0.5 (Nyquist)
+ lb : float, optional
+ The cut-off frequency for the high-pass filtering as a proportion of the
+ sampling rate. Default to 0
+ n_iterations: int, optional
+ how many rounds of smoothing to do. Default to 2.
+
+ Returns
+ -------
+ float array:
+ The signal, filtered
+ """
+
+ n = time_series.shape[-1]
+
+ len_boxcar_ub = np.ceil(1 / (2.0 * ub))
+ boxcar_ub = np.empty(len_boxcar_ub)
+ boxcar_ub.fill(1.0 / len_boxcar_ub)
+ boxcar_ones_ub = np.ones_like(boxcar_ub)
+
+ if lb == 0:
+ lb = None
+ else:
+ len_boxcar_lb = np.ceil(1 / (2.0 * lb))
+ boxcar_lb = np.empty(len_boxcar_lb)
+ boxcar_lb.fill(1.0 / len_boxcar_lb)
+ boxcar_ones_lb = np.ones_like(boxcar_lb)
+
+ #If the time_series is a 1-d, we add a dimension, so that we can iterate
+ #over 2-d inputs:
+ if len(time_series.shape) == 1:
+ time_series = np.array([time_series])
+ for i in range(time_series.shape[0]):
+ if ub:
+ #Start by applying a low-pass to the signal. Pad the signal on
+ #each side with the initial and terminal signal value:
+ pad_s = np.hstack((boxcar_ones_ub *
+ time_series[i, 0], time_series[i]))
+ pad_s = np.hstack((pad_s, boxcar_ones_ub * time_series[i, -1]))
+
+ #Filter operation is a convolution with the box-car(iterate,
+ #n_iterations times over this operation):
+ for iteration in range(n_iterations):
+ conv_s = np.convolve(pad_s, boxcar_ub)
+
+ #Extract the low pass signal by excising the central
+ #len(time_series) points:
+ time_series[i] = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):
+ conv_s.shape[-1] / 2 + np.ceil(n / 2.)]
+
+ #Now, if there is a high-pass, do the same, but in the end subtract out
+ #the low-passed signal:
+ if lb:
+ pad_s = np.hstack((boxcar_ones_lb *
+ time_series[i, 0], time_series[i]))
+ pad_s = np.hstack((pad_s, boxcar_ones_lb * time_series[i, -1]))
+
+ #Filter operation is a convolution with the box-car(iterate,
+ #n_iterations times over this operation):
+ for iteration in range(n_iterations):
+ conv_s = np.convolve(pad_s, boxcar_lb)
+
+ #Extract the low pass signal by excising the central
+ #len(time_series) points:
+ s_lp = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):
+ conv_s.shape[-1] / 2 + np.ceil(n / 2.)]
+
+ #Extract the high pass signal simply by subtracting the high pass
+ #signal from the original signal:
+ time_series[i] = time_series[i] - s_lp + np.mean(s_lp) # add mean
+ #to make sure that there are no negative values. This also seems to
+ #make sure that the mean of the signal (in % signal change) is
+ #close to 0
+
+ return time_series.squeeze()
diff --git a/nitime/algorithms/spectral.py b/nitime/algorithms/spectral.py
new file mode 100644
index 0000000..e240f5a
--- /dev/null
+++ b/nitime/algorithms/spectral.py
@@ -0,0 +1,959 @@
+"""
+
+Spectral transforms are used in order to estimate the frequency-domain
+representation of time-series. Several methods can be used and this module
+contains implementations of several algorithms for the calculation of spectral
+transforms.
+
+"""
+
+import numpy as np
+from nitime.lazy import matplotlib_mlab as mlab
+from nitime.lazy import scipy_linalg as linalg
+from nitime.lazy import scipy_signal as sig
+from nitime.lazy import scipy_interpolate as interpolate
+from nitime.lazy import scipy_fftpack as fftpack
+
+import nitime.utils as utils
+
+# To support older versions of numpy that don't have tril_indices:
+from nitime.index_utils import tril_indices, triu_indices
+
+
+# Set global variables for the default NFFT to be used in spectral analysis and
+# the overlap:
+default_nfft = 64
+default_n_overlap = int(np.ceil(default_nfft / 2.0))
+
+def get_spectra(time_series, method=None):
+ r"""
+ Compute the spectra of an n-tuple of time series and all of
+ the pairwise cross-spectra.
+
+ Parameters
+ ----------
+ time_series : float array
+ The time-series, where time is the last dimension
+
+ method : dict, optional
+
+ contains: this_method:'welch'
+ indicates that :func:`mlab.psd` will be used in
+ order to calculate the psd/csd, in which case, additional optional
+ inputs (and default values) are:
+
+ NFFT=64
+
+ Fs=2pi
+
+ detrend=mlab.detrend_none
+
+ window=mlab.window_hanning
+
+ n_overlap=0
+
+ this_method:'periodogram_csd'
+ indicates that :func:`periodogram` will
+ be used in order to calculate the psd/csd, in which case, additional
+ optional inputs (and default values) are:
+
+ Skx=None
+
+ Sky=None
+
+ N=None
+
+ sides='onesided'
+
+ normalize=True
+
+ Fs=2pi
+
+ this_method:'multi_taper_csd'
+ indicates that :func:`multi_taper_psd` used in order to calculate
+ psd/csd, in which case additional optional inputs (and default
+ values) are:
+
+ BW=0.01
+
+ Fs=2pi
+
+ sides = 'onesided'
+
+ Returns
+ -------
+
+ f : float array
+ The central frequencies for the frequency bands for which the spectra
+ are estimated
+
+ fxy : float array
+ A semi-filled matrix with the cross-spectra of the signals. The csd of
+ signal i and signal j is in f[j][i], but not in f[i][j] (which will be
+ filled with zeros). For i=j fxy[i][j] is the psd of signal i.
+
+ """
+ if method is None:
+ method = {'this_method': 'welch'} # The default
+ # If no choice of method was explicitly set, but other parameters were
+ # passed, assume that the method is mlab:
+ this_method = method.get('this_method', 'welch')
+
+ if this_method == 'welch':
+ NFFT = method.get('NFFT', default_nfft)
+ Fs = method.get('Fs', 2 * np.pi)
+ detrend = method.get('detrend', mlab.detrend_none)
+ window = method.get('window', mlab.window_hanning)
+ n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0)))
+
+ # The length of the spectrum depends on how many sides are taken, which
+ # depends on whether or not this is a complex object:
+ if np.iscomplexobj(time_series):
+ fxy_len = NFFT
+ else:
+ fxy_len = NFFT / 2.0 + 1
+
+ # If there is only 1 channel in the time-series:
+ if len(time_series.shape) == 1 or time_series.shape[0] == 1:
+ temp, f = mlab.csd(time_series, time_series,
+ NFFT, Fs, detrend, window, n_overlap,
+ scale_by_freq=True)
+
+ fxy = temp.squeeze() # the output of mlab.csd has a weird
+ # shape
+ else:
+ fxy = np.zeros((time_series.shape[0],
+ time_series.shape[0],
+ fxy_len), dtype=complex) # Make sure it's complex
+
+ for i in range(time_series.shape[0]):
+ for j in range(i, time_series.shape[0]):
+ #Notice funny indexing, in order to conform to the
+ #conventions of the other methods:
+ temp, f = mlab.csd(time_series[j], time_series[i],
+ NFFT, Fs, detrend, window, n_overlap,
+ scale_by_freq=True)
+
+ fxy[i][j] = temp.squeeze() # the output of mlab.csd has a
+ # weird shape
+ elif this_method in ('multi_taper_csd', 'periodogram_csd'):
+ # these methods should work with similar signatures
+ mdict = method.copy()
+ func = eval(mdict.pop('this_method'))
+ freqs, fxy = func(time_series, **mdict)
+ f = utils.circle_to_hz(freqs, mdict.get('Fs', 2 * np.pi))
+
+ else:
+ raise ValueError("Unknown method provided")
+
+ return f, fxy.squeeze()
+
+
+def get_spectra_bi(x, y, method=None):
+ r"""
+ Computes the spectra of two timeseries and the cross-spectrum between them
+
+ Parameters
+ ----------
+
+ x,y : float arrays
+ Time-series data
+
+ method : dict, optional
+ See :func:`get_spectra` documentation for details
+
+ Returns
+ -------
+ f : float array
+ The central frequencies for the frequency
+ bands for which the spectra are estimated
+ fxx : float array
+ The psd of the first signal
+ fyy : float array
+ The psd of the second signal
+ fxy : float array
+ The cross-spectral density of the two signals
+
+ """
+ f, fij = get_spectra(np.vstack((x, y)), method=method)
+ fxx = fij[0, 0].real
+ fyy = fij[1, 1].real
+ fxy = fij[0, 1]
+ return f, fxx, fyy, fxy
+
+
+# The following spectrum estimates are normalized to the convention
+# adopted by MATLAB (or at least spectrum.psd)
+# By definition, Sxx(f) = DTFT{Rxx(n)}, where Rxx(n) is the autocovariance
+# function of x(n). Therefore the integral from
+# [-Fs/2, Fs/2] of Sxx(f)*df is Rxx(0).
+# And from the definition of Rxx(n),
+# Rxx(0) = Expected-Value{x(n)x*(n)} = Expected-Value{ |x|^2 },
+# which is estimated as (x*x.conj()).mean()
+# In other words, sum(Sxx) * Fs / NFFT ~ var(x)
+
+def periodogram(s, Fs=2 * np.pi, Sk=None, N=None,
+ sides='default', normalize=True):
+ """Takes an N-point periodogram estimate of the PSD function. The
+ number of points N, or a precomputed FFT Sk may be provided. By default,
+ the PSD function returned is normalized so that the integral of the PSD
+ is equal to the mean squared amplitude (mean energy) of s (see Notes).
+
+ Parameters
+ ----------
+ s : ndarray
+ Signal(s) for which to estimate the PSD, time dimension in the last
+ axis
+
+ Fs : float (optional)
+ The sampling rate. Defaults to 2*pi
+
+ Sk : ndarray (optional)
+ Precomputed FFT of s
+
+ N : int (optional)
+ Indicates an N-point FFT where N != s.shape[-1]
+
+ sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
+ This determines which sides of the spectrum to return.
+ For complex-valued inputs, the default is two-sided, for real-valued
+ inputs, default is one-sided Indicates whether to return a one-sided
+ or two-sided
+
+ PSD normalize : boolean (optional, default=True) Normalizes the PSD
+
+ Returns
+ -------
+ (f, psd) : tuple
+ f: The central frequencies for the frequency bands
+ PSD estimate for each row of s
+
+ """
+ if Sk is not None:
+ N = Sk.shape[-1]
+ else:
+ N = s.shape[-1] if not N else N
+ Sk = fftpack.fft(s, n=N)
+ pshape = list(Sk.shape)
+
+ # if the time series is a complex vector, a one sided PSD is invalid:
+ if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
+ sides = 'twosided'
+ elif sides in ('default', 'onesided'):
+ sides = 'onesided'
+
+ if sides == 'onesided':
+ # putative Nyquist freq
+ Fn = N / 2 + 1
+ # last duplicate freq
+ Fl = (N + 1) / 2
+ pshape[-1] = Fn
+ P = np.zeros(pshape, 'd')
+ freqs = np.linspace(0, Fs / 2, Fn)
+ P[..., 0] = (Sk[..., 0] * Sk[..., 0].conj()).real
+ P[..., 1:Fl] = 2 * (Sk[..., 1:Fl] * Sk[..., 1:Fl].conj()).real
+ if Fn > Fl:
+ P[..., Fn - 1] = (Sk[..., Fn - 1] * Sk[..., Fn - 1].conj()).real
+ else:
+ P = (Sk * Sk.conj()).real
+ freqs = np.linspace(0, Fs, N, endpoint=False)
+ if normalize:
+ P /= (Fs * s.shape[-1])
+ return freqs, P
+
+
+def periodogram_csd(s, Fs=2 * np.pi, Sk=None, NFFT=None, sides='default',
+ normalize=True):
+ """Takes an N-point periodogram estimate of all the cross spectral
+ density functions between rows of s.
+
+ The number of points N, or a precomputed FFT Sk may be provided. By
+ default, the CSD function returned is normalized so that the integral of
+ the PSD is equal to the mean squared amplitude (mean energy) of s (see
+ Notes).
+
+ Parameters
+ ---------
+
+ s : ndarray
+ Signals for which to estimate the CSD, time dimension in the last axis
+
+ Fs : float (optional)
+ The sampling rate. Defaults to 2*pi
+
+ Sk : ndarray (optional)
+ Precomputed FFT of rows of s
+
+ NFFT : int (optional)
+ Indicates an N-point FFT where N != s.shape[-1]
+
+ sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
+ This determines which sides of the spectrum to return.
+ For complex-valued inputs, the default is two-sided, for real-valued
+ inputs, default is one-sided Indicates whether to return a one-sided
+ or two-sided
+
+ normalize : boolean (optional)
+ Normalizes the PSD
+
+ Returns
+ -------
+
+ freqs, csd_est : ndarrays
+ The estimated CSD and the frequency points vector.
+ The CSD{i,j}(f) are returned in a square "matrix" of vectors
+ holding Sij(f). For an input array that is reshaped to (M,N),
+ the output is (M,M,N)
+
+ """
+ s_shape = s.shape
+ s.shape = (np.prod(s_shape[:-1]), s_shape[-1])
+ # defining an Sk_loc is a little opaque, but it avoids having to
+ # reset the shape of any user-given Sk later on
+ if Sk is not None:
+ Sk_shape = Sk.shape
+ N = Sk.shape[-1]
+ Sk_loc = Sk.reshape(np.prod(Sk_shape[:-1]), N)
+ else:
+ if NFFT is not None:
+ N = NFFT
+ else:
+ N = s.shape[-1]
+ Sk_loc = fftpack.fft(s, n=N)
+ # reset s.shape
+ s.shape = s_shape
+
+ M = Sk_loc.shape[0]
+
+ # if the time series is a complex vector, a one sided PSD is invalid:
+ if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
+ sides = 'twosided'
+ elif sides in ('default', 'onesided'):
+ sides = 'onesided'
+
+ if sides == 'onesided':
+ # putative Nyquist freq
+ Fn = N / 2 + 1
+ # last duplicate freq
+ Fl = (N + 1) / 2
+ csd_pairs = np.zeros((M, M, Fn), 'D')
+ freqs = np.linspace(0, Fs / 2, Fn)
+ for i in range(M):
+ for j in range(i + 1):
+ csd_pairs[i, j, 0] = Sk_loc[i, 0] * Sk_loc[j, 0].conj()
+ csd_pairs[i, j, 1:Fl] = 2 * (Sk_loc[i, 1:Fl] *
+ Sk_loc[j, 1:Fl].conj())
+ if Fn > Fl:
+ csd_pairs[i, j, Fn - 1] = (Sk_loc[i, Fn - 1] *
+ Sk_loc[j, Fn - 1].conj())
+
+ else:
+ csd_pairs = np.zeros((M, M, N), 'D')
+ freqs = np.linspace(0, Fs / 2, N, endpoint=False)
+ for i in range(M):
+ for j in range(i + 1):
+ csd_pairs[i, j] = Sk_loc[i] * Sk_loc[j].conj()
+ if normalize:
+ csd_pairs /= (Fs*N)
+
+ csd_mat = csd_pairs.transpose(1,0,2).conj()
+ csd_mat += csd_pairs
+ diag_idc = (np.arange(M), np.arange(M))
+ csd_mat[diag_idc] /= 2
+
+ return freqs, csd_mat
+
+
+def dpss_windows(N, NW, Kmax, interp_from=None, interp_kind='linear'):
+ """
+ Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
+ for a given frequency-spacing multiple NW and sequence length N.
+
+ Parameters
+ ----------
+ N : int
+ sequence length
+ NW : float, unitless
+ standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt
+ but with dt taken as 1
+ Kmax : int
+ number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
+ interp_from : int (optional)
+ The dpss can be calculated using interpolation from a set of dpss
+ with the same NW and Kmax, but shorter N. This is the length of this
+ shorter set of dpss windows.
+ interp_kind : str (optional)
+ This input variable is passed to scipy.interpolate.interp1d and
+ specifies the kind of interpolation as a string ('linear', 'nearest',
+ 'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
+ order of the spline interpolator to use.
+
+
+ Returns
+ -------
+ v, e : tuple,
+ v is an array of DPSS windows shaped (Kmax, N)
+ e are the eigenvalues
+
+ Notes
+ -----
+ Tridiagonal form of DPSS calculation from:
+
+ Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
+ uncertainty V: The discrete case. Bell System Technical Journal,
+ Volume 57 (1978), 1371430
+ """
+ Kmax = int(Kmax)
+ W = float(NW) / N
+ nidx = np.arange(N, dtype='d')
+
+ # In this case, we create the dpss windows of the smaller size
+ # (interp_from) and then interpolate to the larger size (N)
+ if interp_from is not None:
+ if interp_from > N:
+ e_s = 'In dpss_windows, interp_from is: %s ' % interp_from
+ e_s += 'and N is: %s. ' % N
+ e_s += 'Please enter interp_from smaller than N.'
+ raise ValueError(e_s)
+ dpss = []
+ d, e = dpss_windows(interp_from, NW, Kmax)
+ for this_d in d:
+ x = np.arange(this_d.shape[-1])
+ I = interpolate.interp1d(x, this_d, kind=interp_kind)
+ d_temp = I(np.arange(0, this_d.shape[-1] - 1,
+ float(this_d.shape[-1] - 1) / N))
+
+ # Rescale:
+ d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2))
+
+ dpss.append(d_temp)
+
+ dpss = np.array(dpss)
+
+ else:
+ # here we want to set up an optimization problem to find a sequence
+ # whose energy is maximally concentrated within band [-W,W].
+ # Thus, the measure lambda(T,W) is the ratio between the energy within
+ # that band, and the total energy. This leads to the eigen-system
+ # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
+ # eigenvalue is the sequence with maximally concentrated energy. The
+ # collection of eigenvectors of this system are called Slepian
+ # sequences, or discrete prolate spheroidal sequences (DPSS). Only the
+ # first K, K = 2NW/dt orders of DPSS will exhibit good spectral
+ # concentration
+ # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
+
+ # Here I set up an alternative symmetric tri-diagonal eigenvalue
+ # problem such that
+ # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
+ # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
+ # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
+ # [see Percival and Walden, 1993]
+ diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
+ off_diag = np.zeros_like(nidx)
+ off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
+ # put the diagonals in LAPACK "packed" storage
+ ab = np.zeros((2, N), 'd')
+ ab[1] = diagonal
+ ab[0, 1:] = off_diag[:-1]
+ # only calculate the highest Kmax eigenvalues
+ w = linalg.eigvals_banded(ab, select='i',
+ select_range=(N - Kmax, N - 1))
+ w = w[::-1]
+
+ # find the corresponding eigenvectors via inverse iteration
+ t = np.linspace(0, np.pi, N)
+ dpss = np.zeros((Kmax, N), 'd')
+ for k in range(Kmax):
+ dpss[k] = utils.tridi_inverse_iteration(
+ diagonal, off_diag, w[k], x0=np.sin((k + 1) * t)
+ )
+
+ # By convention (Percival and Walden, 1993 pg 379)
+ # * symmetric tapers (k=0,2,4,...) should have a positive average.
+ # * antisymmetric tapers should begin with a positive lobe
+ fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
+ for i, f in enumerate(fix_symmetric):
+ if f:
+ dpss[2 * i] *= -1
+ # rather than test the sign of one point, test the sign of the
+ # linear slope up to the first (largest) peak
+ pk = np.argmax( np.abs(dpss[1::2, :N/2]), axis=1 )
+ for i, p in enumerate(pk):
+ if np.sum(dpss[2 * i + 1, :p]) < 0:
+ dpss[2 * i + 1] *= -1
+
+ # Now find the eigenvalues of the original spectral concentration problem
+ # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
+ dpss_rxx = utils.autocorr(dpss) * N
+ r = 4 * W * np.sinc(2 * W * nidx)
+ r[0] = 2 * W
+ eigvals = np.dot(dpss_rxx, r)
+
+ return dpss, eigvals
+
+def tapered_spectra(s, tapers, NFFT=None, low_bias=True):
+ """
+ Compute the tapered spectra of the rows of s.
+
+ Parameters
+ ----------
+
+ s : ndarray, (n_arr, n_pts)
+ An array whose rows are timeseries.
+
+ tapers : ndarray or container
+ Either the precomputed DPSS tapers, or the pair of parameters
+ (NW, K) needed to compute K tapers of length n_pts.
+
+ NFFT : int
+ Number of FFT bins to compute
+
+ low_bias : Boolean
+ If compute DPSS, automatically select tapers corresponding to
+ > 90% energy concentration.
+
+ Returns
+ -------
+
+ t_spectra : ndarray, shaped (n_arr, K, NFFT)
+ The FFT of the tapered sequences in s. First dimension is squeezed
+ out if n_arr is 1.
+ eigvals : ndarray
+ The eigenvalues are also returned if DPSS are calculated here.
+
+ """
+ N = s.shape[-1]
+ # XXX: don't allow NFFT < N -- not every implementation is so restrictive!
+ if NFFT is None or NFFT < N:
+ NFFT = N
+ rest_of_dims = s.shape[:-1]
+ M = int(np.product(rest_of_dims))
+
+ s = s.reshape(int(np.product(rest_of_dims)), N)
+ # de-mean this sucker
+ s = utils.remove_bias(s, axis=-1)
+
+ if not isinstance(tapers, np.ndarray):
+ # then tapers is (NW, K)
+ args = (N,) + tuple(tapers)
+ dpss, eigvals = dpss_windows(*args)
+ if low_bias:
+ keepers = (eigvals > 0.9)
+ dpss = dpss[keepers]
+ eigvals = eigvals[keepers]
+ tapers = dpss
+ else:
+ eigvals = None
+ K = tapers.shape[0]
+ sig_sl = [slice(None)] * len(s.shape)
+ sig_sl.insert(len(s.shape) - 1, np.newaxis)
+
+ # tapered.shape is (M, Kmax, N)
+ tapered = s[sig_sl] * tapers
+
+ # compute the y_{i,k}(f) -- full FFT takes ~1.5x longer, but unpacking
+ # results of real-valued FFT eats up memory
+ t_spectra = fftpack.fft(tapered, n=NFFT, axis=-1)
+ t_spectra.shape = rest_of_dims + (K, NFFT)
+ if eigvals is None:
+ return t_spectra
+ return t_spectra, eigvals
+
+def mtm_cross_spectrum(tx, ty, weights, sides='twosided'):
+ r"""
+
+ The cross-spectrum between two tapered time-series, derived from a
+ multi-taper spectral estimation.
+
+ Parameters
+ ----------
+
+ tx, ty : ndarray (K, ..., N)
+ The complex DFTs of the tapered sequence
+
+ weights : ndarray, or 2-tuple or list
+ Weights can be specified as a length-2 list of weights for spectra tx
+ and ty respectively. Alternatively, if tx is ty and this function is
+ computing the spectral density function of a single sequence, the
+ weights can be given as an ndarray of weights for the spectrum.
+ Weights may be
+
+ * scalars, if the shape of the array is (K, ..., 1)
+ * vectors, with the shape of the array being the same as tx or ty
+
+ sides : str in {'onesided', 'twosided'}
+ For the symmetric spectra of a real sequence, optionally combine half
+ of the frequencies and scale the duplicate frequencies in the range
+ (0, F_nyquist).
+
+ Notes
+ -----
+
+ spectral densities are always computed as
+
+ :math:`S_{xy}^{mt}(f) = \frac{\sum_k
+ [d_k^x(f)s_k^x(f)][d_k^y(f)(s_k^y(f))^{*}]}{[\sum_k
+ d_k^x(f)^2]^{\frac{1}{2}}[\sum_k d_k^y(f)^2]^{\frac{1}{2}}}`
+
+ """
+ N = tx.shape[-1]
+ if ty.shape != tx.shape:
+ raise ValueError('shape mismatch between tx, ty')
+
+ # pshape = list(tx.shape)
+
+ if isinstance(weights, (list, tuple)):
+ autospectrum = False
+ weights_x = weights[0]
+ weights_y = weights[1]
+ denom = (np.abs(weights_x) ** 2).sum(axis=0) ** 0.5
+ denom *= (np.abs(weights_y) ** 2).sum(axis=0) ** 0.5
+ else:
+ autospectrum = True
+ weights_x = weights
+ weights_y = weights
+ denom = (np.abs(weights) ** 2).sum(axis=0)
+
+ if sides == 'onesided':
+ # where the nyq freq should be
+ Fn = N / 2 + 1
+ truncated_slice = [slice(None)] * len(tx.shape)
+ truncated_slice[-1] = slice(0, Fn)
+ tsl = tuple(truncated_slice)
+ tx = tx[tsl]
+ ty = ty[tsl]
+ # if weights.shape[-1] > 1 then make sure weights are truncated too
+ if weights_x.shape[-1] > 1:
+ weights_x = weights_x[tsl]
+ weights_y = weights_y[tsl]
+ denom = denom[tsl[1:]]
+
+ sf = weights_x * tx
+ sf *= (weights_y * ty).conj()
+ sf = sf.sum(axis=0)
+ sf /= denom
+
+ if sides == 'onesided':
+ # dbl power at duplicated freqs
+ Fl = (N + 1) / 2
+ sub_slice = [slice(None)] * len(sf.shape)
+ sub_slice[-1] = slice(1, Fl)
+ sf[tuple(sub_slice)] *= 2
+
+ if autospectrum:
+ return sf.real
+ return sf
+
+
+def multi_taper_psd(
+ s, Fs=2 * np.pi, NW=None, BW=None, adaptive=False,
+ jackknife=True, low_bias=True, sides='default', NFFT=None
+ ):
+ """Returns an estimate of the PSD function of s using the multitaper
+ method. If the NW product, or the BW and Fs in Hz are not specified
+ by the user, a bandwidth of 4 times the fundamental frequency,
+ corresponding to NW = 4 will be used.
+
+ Parameters
+ ----------
+ s : ndarray
+ An array of sampled random processes, where the time axis is assumed to
+ be on the last axis
+
+ Fs : float
+ Sampling rate of the signal
+
+ NW : float
+ The normalized half-bandwidth of the data tapers, indicating a
+ multiple of the fundamental frequency of the DFT (Fs/N).
+ Common choices are n/2, for n >= 4. This parameter is unitless
+ and more MATLAB compatible. As an alternative, set the BW
+ parameter in Hz. See Notes on bandwidth.
+
+ BW : float
+ The sampling-relative bandwidth of the data tapers, in Hz.
+
+ adaptive : {True/False}
+ Use an adaptive weighting routine to combine the PSD estimates of
+ different tapers.
+
+ jackknife : {True/False}
+ Use the jackknife method to make an estimate of the PSD variance
+ at each point.
+
+ low_bias : {True/False}
+ Rather than use 2NW tapers, only use the tapers that have better than
+ 90% spectral concentration within the bandwidth (still using
+ a maximum of 2NW tapers)
+
+ sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
+ This determines which sides of the spectrum to return.
+ For complex-valued inputs, the default is two-sided, for real-valued
+ inputs, default is one-sided Indicates whether to return a one-sided
+ or two-sided
+
+ Returns
+ -------
+ (freqs, psd_est, var_or_nu) : ndarrays
+ The first two arrays are the frequency points vector and the
+ estimated PSD. The last returned array differs depending on whether
+ the jackknife was used. It is either
+
+ * The jackknife estimated variance of the log-psd, OR
+ * The degrees of freedom in a chi2 model of how the estimated
+ PSD is distributed about the true log-PSD (this is either
+ 2*floor(2*NW), or calculated from adaptive weights)
+
+ Notes
+ -----
+
+ The bandwidth of the windowing function will determine the number
+ tapers to use. This parameters represents trade-off between frequency
+ resolution (lower main lobe BW for the taper) and variance reduction
+ (higher BW and number of averaged estimates). Typically, the number of
+ tapers is calculated as 2x the bandwidth-to-fundamental-frequency
+ ratio, as these eigenfunctions have the best energy concentration.
+
+ """
+ # have last axis be time series for now
+ N = s.shape[-1]
+ M = int(np.product(s.shape[:-1]))
+
+ if BW is not None:
+ # BW wins in a contest (since it was the original implementation)
+ norm_BW = np.round(BW * N / Fs)
+ NW = norm_BW / 2.0
+ elif NW is None:
+ # default NW
+ NW = 4
+ # (else BW is None and NW is not None) ... all set
+ Kmax = int(2 * NW)
+
+ # if the time series is a complex vector, a one sided PSD is invalid:
+ if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
+ sides = 'twosided'
+ elif sides in ('default', 'onesided'):
+ sides = 'onesided'
+
+ # Find the direct spectral estimators S_k(f) for k tapered signals..
+ # don't normalize the periodograms by 1/N as normal.. since the taper
+ # windows are orthonormal, they effectively scale the signal by 1/N
+ spectra, eigvals = tapered_spectra(
+ s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias
+ )
+ NFFT = spectra.shape[-1]
+ K = len(eigvals)
+ # collapse spectra's shape back down to 3 dimensions
+ spectra.shape = (M, K, NFFT)
+
+ last_freq = NFFT / 2 + 1 if sides == 'onesided' else NFFT
+
+ # degrees of freedom at each timeseries, at each freq
+ nu = np.empty((M, last_freq))
+ if adaptive:
+ weights = np.empty((M, K, last_freq))
+ for i in range(M):
+ weights[i], nu[i] = utils.adaptive_weights(
+ spectra[i], eigvals, sides=sides
+ )
+ else:
+ # let the weights simply be the square-root of the eigenvalues.
+ # repeat these values across all n_chan channels of data
+ weights = np.tile(np.sqrt(eigvals), M).reshape(M, K, 1)
+ nu.fill(2 * K)
+
+ if jackknife:
+ jk_var = np.empty_like(nu)
+ for i in range(M):
+ jk_var[i] = utils.jackknifed_sdf_variance(
+ spectra[i], eigvals, sides=sides, adaptive=adaptive
+ )
+
+ # Compute the unbiased spectral estimator for S(f) as the sum of
+ # the S_k(f) weighted by the function w_k(f)**2, all divided by the
+ # sum of the w_k(f)**2 over k
+
+ # 1st, roll the tapers axis forward
+ spectra = np.rollaxis(spectra, 1, start=0)
+ weights = np.rollaxis(weights, 1, start=0)
+ sdf_est = mtm_cross_spectrum(
+ spectra, spectra, weights, sides=sides
+ )
+ sdf_est /= Fs
+
+ if sides == 'onesided':
+ freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
+ else:
+ freqs = np.linspace(0, Fs, NFFT, endpoint=False)
+
+ out_shape = s.shape[:-1] + (len(freqs),)
+ sdf_est.shape = out_shape
+ if jackknife:
+ jk_var.shape = out_shape
+ return freqs, sdf_est, jk_var
+ else:
+ nu.shape = out_shape
+ return freqs, sdf_est, nu
+
+
+def multi_taper_csd(s, Fs=2 * np.pi, NW=None, BW=None, low_bias=True,
+ adaptive=False, sides='default', NFFT=None):
+ """Returns an estimate of the Cross Spectral Density (CSD) function
+ between all (N choose 2) pairs of timeseries in s, using the multitaper
+ method. If the NW product, or the BW and Fs in Hz are not specified by
+ the user, a bandwidth of 4 times the fundamental frequency, corresponding
+ to NW = 4 will be used.
+
+ Parameters
+ ----------
+ s : ndarray
+ An array of sampled random processes, where the time axis is
+ assumed to be on the last axis. If ndim > 2, the number of time
+ series to compare will still be taken as prod(s.shape[:-1])
+
+ Fs : float, Sampling rate of the signal
+
+ NW : float
+ The normalized half-bandwidth of the data tapers, indicating a
+ multiple of the fundamental frequency of the DFT (Fs/N).
+ Common choices are n/2, for n >= 4. This parameter is unitless
+ and more MATLAB compatible. As an alternative, set the BW
+ parameter in Hz. See Notes on bandwidth.
+
+ BW : float
+ The sampling-relative bandwidth of the data tapers, in Hz.
+
+ adaptive : {True, False}
+ Use adaptive weighting to combine spectra
+
+ low_bias : {True, False}
+ Rather than use 2NW tapers, only use the tapers that have better than
+ 90% spectral concentration within the bandwidth (still using
+ a maximum of 2NW tapers)
+
+ sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
+ This determines which sides of the spectrum to return. For
+ complex-valued inputs, the default is two-sided, for real-valued
+ inputs, default is one-sided Indicates whether to return a one-sided
+ or two-sided
+
+ Returns
+ -------
+ (freqs, csd_est) : ndarrays
+ The estimatated CSD and the frequency points vector.
+ The CSD{i,j}(f) are returned in a square "matrix" of vectors
+ holding Sij(f). For an input array of (M,N), the output is (M,M,N)
+
+ Notes
+ -----
+
+ The bandwidth of the windowing function will determine the number
+ tapers to use. This parameters represents trade-off between frequency
+ resolution (lower main lobe BW for the taper) and variance reduction
+ (higher BW and number of averaged estimates). Typically, the number of
+ tapers is calculated as 2x the bandwidth-to-fundamental-frequency
+ ratio, as these eigenfunctions have the best energy concentration.
+
+ """
+ # have last axis be time series for now
+ N = s.shape[-1]
+ M = int(np.product(s.shape[:-1]))
+
+ if BW is not None:
+ # BW wins in a contest (since it was the original implementation)
+ norm_BW = np.round(BW * N / Fs)
+ NW = norm_BW / 2.0
+ elif NW is None:
+ # default NW
+ NW = 4
+ # (else BW is None and NW is not None) ... all set
+ Kmax = int(2 * NW)
+
+ # if the time series is a complex vector, a one sided PSD is invalid:
+ if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
+ sides = 'twosided'
+ elif sides in ('default', 'onesided'):
+ sides = 'onesided'
+
+ # Find the direct spectral estimators S_k(f) for k tapered signals..
+ # don't normalize the periodograms by 1/N as normal.. since the taper
+ # windows are orthonormal, they effectively scale the signal by 1/N
+ spectra, eigvals = tapered_spectra(
+ s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias
+ )
+ NFFT = spectra.shape[-1]
+ K = len(eigvals)
+ # collapse spectra's shape back down to 3 dimensions
+ spectra.shape = (M, K, NFFT)
+
+ # compute the cross-spectral density functions
+ last_freq = NFFT / 2 + 1 if sides == 'onesided' else NFFT
+
+ if adaptive:
+ w = np.empty((M, K, last_freq))
+ nu = np.empty((M, last_freq))
+ for i in range(M):
+ w[i], nu[i] = utils.adaptive_weights(
+ spectra[i], eigvals, sides=sides
+ )
+ else:
+ weights = np.sqrt(eigvals).reshape(K, 1)
+
+ csd_pairs = np.zeros((M, M, last_freq), 'D')
+ for i in range(M):
+ if adaptive:
+ wi = w[i]
+ else:
+ wi = weights
+ for j in range(i + 1):
+ if adaptive:
+ wj = w[j]
+ else:
+ wj = weights
+ ti = spectra[i]
+ tj = spectra[j]
+ csd_pairs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides)
+
+ csdfs = csd_pairs.transpose(1,0,2).conj()
+ csdfs += csd_pairs
+ diag_idc = (np.arange(M), np.arange(M))
+ csdfs[diag_idc] /= 2
+ csdfs /= Fs
+
+ if sides == 'onesided':
+ freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
+ else:
+ freqs = np.linspace(0, Fs, NFFT, endpoint=False)
+
+ return freqs, csdfs
+
+
+def freq_response(b, a=1., n_freqs=1024, sides='onesided'):
+ """
+ Returns the frequency response of the IIR or FIR filter described
+ by beta and alpha coefficients.
+
+ Parameters
+ ----------
+
+ b : beta sequence (moving average component)
+ a : alpha sequence (autoregressive component)
+ n_freqs : size of frequency grid
+ sides : {'onesided', 'twosided'}
+ compute frequencies between [-PI,PI), or from [0, PI]
+
+ Returns
+ -------
+
+ fgrid, H(e^jw)
+
+ Notes
+ -----
+ For a description of the linear constant-coefficient difference equation,
+ see
+ http://en.wikipedia.org/wiki/Z-transform
+ """
+ # transitioning to scipy freqz
+ real_n = n_freqs // 2 + 1 if sides == 'onesided' else n_freqs
+ return sig.freqz(b, a=a, worN=real_n, whole=sides != 'onesided')
diff --git a/nitime/algorithms/tests/__init__.py b/nitime/algorithms/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/nitime/algorithms/tests/__init__.py
diff --git a/nitime/algorithms/tests/test_autoregressive.py b/nitime/algorithms/tests/test_autoregressive.py
new file mode 100644
index 0000000..3eb2027
--- /dev/null
+++ b/nitime/algorithms/tests/test_autoregressive.py
@@ -0,0 +1,216 @@
+import numpy as np
+import numpy.testing as npt
+import numpy.testing.decorators as dec
+
+import nitime.algorithms as tsa
+import nitime.utils as utils
+
+# Set the random seed:
+np.random.seed(1)
+
+
+def _random_poles(half_poles=3):
+ poles_rp = np.random.rand(half_poles * 20)
+ poles_ip = np.random.rand(half_poles * 20)
+
+ # get real/imag parts of some poles such that magnitudes bounded
+ # away from 1
+ stable_pole_idx = np.where(poles_rp ** 2 + poles_ip ** 2 < .75 ** 2)[0]
+ # keep 3 of these, and supplement with complex conjugate
+ stable_poles = poles_rp[stable_pole_idx[:half_poles]] + \
+ 1j * poles_ip[stable_pole_idx[:half_poles]]
+ stable_poles = np.r_[stable_poles, stable_poles.conj()]
+ # we have the roots, now find the polynomial
+ ak = np.poly(stable_poles)
+ return ak
+
+
+def test_AR_est_consistency():
+ order = 10 # some even number
+ ak = _random_poles(order / 2)
+ x, v, _ = utils.ar_generator(N=512, coefs=-ak[1:], drop_transients=100)
+ ak_yw, ssq_yw = tsa.AR_est_YW(x, order)
+ ak_ld, ssq_ld = tsa.AR_est_LD(x, order)
+ npt.assert_almost_equal(ak_yw, ak_ld)
+ npt.assert_almost_equal(ssq_yw, ssq_ld)
+
+
+def test_AR_YW():
+ arsig, _, _ = utils.ar_generator(N=512)
+ avg_pwr = (arsig * arsig.conjugate()).mean()
+ order = 8
+ ak, sigma_v = tsa.AR_est_YW(arsig, order)
+ w, psd = tsa.AR_psd(ak, sigma_v)
+ # the psd is a one-sided power spectral density, which has been
+ # multiplied by 2 to preserve the property that
+ # 1/2pi int_{-pi}^{pi} Sxx(w) dw = Rxx(0)
+
+ # evaluate this integral numerically from 0 to pi
+ dw = np.pi / len(psd)
+ avg_pwr_est = np.trapz(psd, dx=dw) / (2 * np.pi)
+ # consistency on the order of 10**0 is pretty good for this test
+ npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)
+
+ # Test for providing the autocovariance as an input:
+ ak, sigma_v = tsa.AR_est_YW(arsig, order, utils.autocov(arsig))
+ w, psd = tsa.AR_psd(ak, sigma_v)
+ avg_pwr_est = np.trapz(psd, dx=dw) / (2 * np.pi)
+ npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)
+
+
+def test_AR_LD():
+ """
+
+ Test the Levinson Durbin estimate of the AR coefficients against the
+ expercted PSD
+
+ """
+ arsig, _, _ = utils.ar_generator(N=512)
+ avg_pwr = (arsig * arsig.conjugate()).real.mean()
+ order = 8
+ ak, sigma_v = tsa.AR_est_LD(arsig, order)
+ w, psd = tsa.AR_psd(ak, sigma_v)
+
+ # the psd is a one-sided power spectral density, which has been
+ # multiplied by 2 to preserve the property that
+ # 1/2pi int_{-pi}^{pi} Sxx(w) dw = Rxx(0)
+
+ # evaluate this integral numerically from 0 to pi
+ dw = np.pi / len(psd)
+ avg_pwr_est = np.trapz(psd, dx=dw) / (2 * np.pi)
+ npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)
+
+ # Test for providing the autocovariance as an input:
+ ak, sigma_v = tsa.AR_est_LD(arsig, order, utils.autocov(arsig))
+ w, psd = tsa.AR_psd(ak, sigma_v)
+ avg_pwr_est = np.trapz(psd, dx=dw) / (2 * np.pi)
+ npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)
+
+
+@dec.slow
+def test_MAR_est_LWR():
+ """
+
+ Test the LWR MAR estimator against the power of the signal
+
+ This also tests the functions: transfer_function_xy, spectral_matrix_xy,
+ coherence_from_spectral and granger_causality_xy
+
+ """
+
+ # This is the same processes as those in doc/examples/ar_est_2vars.py:
+ a1 = np.array([[0.9, 0],
+ [0.16, 0.8]])
+
+ a2 = np.array([[-0.5, 0],
+ [-0.2, -0.5]])
+
+ am = np.array([-a1, -a2])
+
+ x_var = 1
+ y_var = 0.7
+ xy_cov = 0.4
+ cov = np.array([[x_var, xy_cov],
+ [xy_cov, y_var]])
+
+ n_freqs = 1024
+ w, Hw = tsa.transfer_function_xy(am, n_freqs=n_freqs)
+ Sw = tsa.spectral_matrix_xy(Hw, cov)
+
+ # This many realizations of the process:
+ N = 500
+ # Each one this long
+ L = 1024
+
+ order = am.shape[0]
+ n_lags = order + 1
+
+ n_process = am.shape[-1]
+
+ z = np.empty((N, n_process, L))
+ nz = np.empty((N, n_process, L))
+
+ for i in range(N):
+ z[i], nz[i] = utils.generate_mar(am, cov, L)
+
+ a_est = []
+ cov_est = []
+
+ # This loop runs MAR_est_LWR:
+ for i in range(N):
+ Rxx = (tsa.MAR_est_LWR(z[i], order=n_lags))
+ a_est.append(Rxx[0])
+ cov_est.append(Rxx[1])
+
+ a_est = np.mean(a_est, 0)
+ cov_est = np.mean(cov_est, 0)
+
+ # This tests transfer_function_xy and spectral_matrix_xy:
+ w, Hw_est = tsa.transfer_function_xy(a_est, n_freqs=n_freqs)
+ Sw_est = tsa.spectral_matrix_xy(Hw_est, cov_est)
+
+ # coherence_from_spectral:
+ c = tsa.coherence_from_spectral(Sw)
+ c_est = tsa.coherence_from_spectral(Sw_est)
+
+ # granger_causality_xy:
+
+ w, f_x2y, f_y2x, f_xy, Sw = tsa.granger_causality_xy(am,
+ cov,
+ n_freqs=n_freqs)
+
+ w, f_x2y_est, f_y2x_est, f_xy_est, Sw_est = tsa.granger_causality_xy(a_est,
+ cov_est,
+ n_freqs=n_freqs)
+
+ # interdependence_xy
+ i_xy = tsa.interdependence_xy(Sw)
+ i_xy_est = tsa.interdependence_xy(Sw_est)
+
+ # This is all very approximate:
+ npt.assert_almost_equal(Hw, Hw_est, decimal=1)
+ npt.assert_almost_equal(Sw, Sw_est, decimal=1)
+ npt.assert_almost_equal(c, c_est, 1)
+ npt.assert_almost_equal(f_xy, f_xy_est, 1)
+ npt.assert_almost_equal(f_x2y, f_x2y_est, 1)
+ npt.assert_almost_equal(f_y2x, f_y2x_est, 1)
+ npt.assert_almost_equal(i_xy, i_xy_est, 1)
+
+
+def test_lwr():
+ "test solution of lwr recursion"
+ for trial in range(3):
+ nc = np.random.randint(2, high=10)
+ P = np.random.randint(2, high=6)
+ # nc is channels, P is lags (order)
+ r = np.random.randn(P + 1, nc, nc)
+ r[0] = np.dot(r[0], r[0].T) # force r0 to be symmetric
+
+ a, Va = tsa.lwr_recursion(r)
+ # Verify the "orthogonality" principle of the mAR system
+ # Set up a system in blocks to compute, for each k
+ # sum_{i=1}^{P} A(i)R(k-i) = -R(k) k > 0
+ # = sum_{i=1}^{P} R(k-i)^T A(i)^T = -R(k)^T
+ # = sum_{i=1}^{P} R(i-k)A(i)^T = -R(k)^T
+ rmat = np.zeros((nc * P, nc * P))
+ for k in range(1, P + 1):
+ for i in range(1, P + 1):
+ im = i - k
+ if im < 0:
+ r1 = r[-im].T
+ else:
+ r1 = r[im]
+ rmat[(k - 1) * nc:k * nc, (i - 1) * nc:i * nc] = r1
+
+ rvec = np.zeros((nc * P, nc))
+ avec = np.zeros((nc * P, nc))
+ for m in range(P):
+ rvec[m * nc:(m + 1) * nc] = -r[m + 1].T
+ avec[m * nc:(m + 1) * nc] = a[m].T
+
+ l2_d = np.dot(rmat, avec) - rvec
+ l2_d = (l2_d ** 2).sum() ** 0.5
+ l2_r = (rvec ** 2).sum() ** 0.5
+
+ # compute |Ax-b| / |b| metric
+ npt.assert_almost_equal(l2_d / l2_r, 0, decimal=5)
diff --git a/nitime/algorithms/tests/test_coherence.py b/nitime/algorithms/tests/test_coherence.py
new file mode 100644
index 0000000..2cb325f
--- /dev/null
+++ b/nitime/algorithms/tests/test_coherence.py
@@ -0,0 +1,395 @@
+"""
+
+Tests of functions under algorithms.coherence
+
+"""
+import nose
+import os
+import warnings
+
+import numpy as np
+import numpy.testing as npt
+from scipy.signal import signaltools
+
+try:
+ import matplotlib
+ import matplotlib.mlab as mlab
+ has_mpl = True
+ # Matplotlib older than 0.99 will have some issues with the normalization
+ # of t:
+ if float(matplotlib.__version__[:3]) < 0.99:
+ w_s = "You have a relatively old version of Matplotlib. "
+ w_s += " Estimation of the PSD DC component might not be as expected."
+ w_s +=" Consider updating Matplotlib: http://matplotlib.sourceforge.net/"
+ warnings.warn(w_s, Warning)
+ old_mpl = True
+ else:
+ old_mpl = False
+
+except ImportError:
+ raise nose.SkipTest()
+
+from scipy import fftpack
+
+import nitime
+import nitime.algorithms as tsa
+import nitime.utils as utils
+
+#Define globally
+test_dir_path = os.path.join(nitime.__path__[0], 'tests')
+
+# Define these once globally:
+t = np.linspace(0, 16 * np.pi, 1024)
+x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])
+y = x + np.random.rand(t.shape[-1])
+
+tseries = np.vstack([x, y])
+
+methods = [None,
+ {"this_method": 'multi_taper_csd', "Fs": 2 * np.pi},
+ {"this_method": 'periodogram_csd', "Fs": 2 * np.pi, "NFFT": 256}]
+
+if has_mpl:
+ methods.append({"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi,
+ "window": mlab.window_hanning(np.ones(256))})
+ methods.append({"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi})
+
+def test_coherency():
+ """
+ Tests that the coherency algorithm runs smoothly, using the different
+ csd routines, that the resulting matrix is symmetric and for the welch
+ method, that the frequency bands in the output make sense
+ """
+
+ for method in methods:
+ f, c = tsa.coherency(tseries, csd_method=method)
+
+ npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())
+ npt.assert_array_almost_equal(c[0, 0], np.ones(f.shape))
+
+ if method is not None and method['this_method'] != "multi_taper_csd":
+ f_theoretical = utils.get_freqs(method['Fs'], method['NFFT'])
+ npt.assert_array_almost_equal(f, f_theoretical)
+ npt.assert_array_almost_equal(f, f_theoretical)
+
+
+def test_coherence():
+ """
+ Tests that the coherency algorithm runs smoothly, using the different csd
+ routines and that the result is symmetrical:
+ """
+
+ for method in methods:
+ f, c = tsa.coherence(tseries, csd_method=method)
+ npt.assert_array_almost_equal(c[0, 1], c[1, 0])
+ npt.assert_array_almost_equal(c[0, 0], np.ones(f.shape))
+
+
+def test_coherency_regularized():
+ """
+ Tests that the regularized coherency algorithm runs smoothly, using the
+ different csd routines and that the result is symmetrical:
+ """
+
+ for method in methods:
+ f, c = tsa.coherency_regularized(tseries, 0.05, 1000,
+ csd_method=method)
+ npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())
+
+
+def test_coherence_regularized():
+ """
+
+ Tests that the regularized coherence algorithm runs smoothly, using the
+ different csd routines and that the result is symmetrical:
+
+ """
+ for method in methods:
+ f, c = tsa.coherence_regularized(tseries, 0.05, 1000,
+ csd_method=method)
+ npt.assert_array_almost_equal(c[0, 1], c[1, 0])
+
+
+# Define as global for the following functions:
+
+def test_coherency_bavg():
+ ub = [np.pi / 2, None]
+ lb = [0, 0.2]
+ for method in methods:
+ for this_lb in lb:
+ for this_ub in ub:
+ c = tsa.coherency_bavg(tseries, lb=this_lb, ub=this_ub,
+ csd_method=method)
+
+ #Test that this gets rid of the frequency axis:
+ npt.assert_equal(len(c.shape), 2)
+ # And that the result is equal
+ npt.assert_almost_equal(c[0, 1], c[1, 0].conjugate())
+
+
+def test_coherence_bavg():
+ ub = [np.pi / 2, None]
+ lb = [0, 0.2]
+ for method in methods:
+ for this_lb in lb:
+ for this_ub in ub:
+ c = tsa.coherence_bavg(tseries, lb=this_lb, ub=this_ub,
+ csd_method=method)
+
+ #Test that this gets rid of the frequency axis:
+ npt.assert_equal(len(c.shape), 2)
+ # And that the result is equal
+ npt.assert_almost_equal(c[0, 1], c[1, 0].conjugate())
+
+
+# XXX FIXME: This doesn't work for the periodogram method:
+def test_coherence_partial():
+ """ Test partial coherence"""
+
+ x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])
+ y = x + np.random.rand(t.shape[-1])
+ z = y + np.random.rand(t.shape[-1])
+
+ for method in methods:
+ if (method is None) or method['this_method'] == 'welch':
+ f, c = tsa.coherence_partial(np.vstack([x, y]), z,
+ csd_method=method)
+ npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())
+
+
+def test_coherence_phase_delay():
+ """
+
+ Test the phase spectrum calculation
+
+ """
+
+ # Set up two time-series with a known phase delay:
+ nz = np.random.rand(t.shape[-1])
+ x = np.sin(t) + nz
+ y = np.sin(t + np.pi) + nz
+
+ tseries = np.vstack([x, y])
+ for method in methods:
+ f1, pdelay = tsa.coherency_phase_spectrum(tseries, csd_method=method)
+ f2, tdelay = tsa.coherency_phase_delay(tseries, csd_method=method)
+ npt.assert_almost_equal(pdelay[0, 1], -pdelay[1, 0])
+ npt.assert_almost_equal(tdelay[0, 1], -tdelay[1, 0])
+ # This is the relationship between these two quantities:
+ npt.assert_almost_equal(tdelay[0, 1],
+ pdelay[0, 1][1:] / (2 * np.pi * f2))
+
+
+def test_coherency_cached():
+ """Tests that the cached coherency gives the same result as the standard
+ coherency"""
+
+ f1, c1 = tsa.coherency(tseries)
+
+ ij = [(0, 1), (1, 0)]
+ f2, cache = tsa.cache_fft(tseries, ij)
+
+ c2 = tsa.cache_to_coherency(cache, ij)
+
+ npt.assert_array_almost_equal(c1[1, 0], c2[1, 0])
+ npt.assert_array_almost_equal(c1[0, 1], c2[0, 1])
+
+
+def test_correlation_spectrum():
+ """
+
+ Test the correlation spectrum method
+
+ """
+ # Smoke-test for now - unclear what to test here...
+ f, c = tsa.correlation_spectrum(x, y, norm=True)
+
+
+# XXX FIXME: http://github.com/nipy/nitime/issues/issue/1
+@npt.dec.skipif(True)
+def test_coherence_linear_dependence():
+ """
+ Tests that the coherence between two linearly dependent time-series
+ behaves as expected.
+
+ From William Wei's book, according to eq. 14.5.34, if two time-series are
+ linearly related through:
+
+ y(t) = alpha*x(t+time_shift)
+
+ then the coherence between them should be equal to:
+
+ .. :math:
+
+ C(\nu) = \frac{1}{1+\frac{fft_{noise}(\nu)}{fft_{x}(\nu) \cdot \alpha^2}}
+
+ """
+ t = np.linspace(0, 16 * np.pi, 2 ** 14)
+ x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + \
+ 0.1 * np.random.rand(t.shape[-1])
+ N = x.shape[-1]
+
+ alpha = 10
+ m = 3
+ noise = 0.1 * np.random.randn(t.shape[-1])
+ y = alpha * np.roll(x, m) + noise
+
+ f_noise = fftpack.fft(noise)[0:N / 2]
+ f_x = fftpack.fft(x)[0:N / 2]
+
+ c_t = (1 / (1 + (f_noise / (f_x * (alpha ** 2)))))
+
+ method = {"this_method": 'welch',
+ "NFFT": 2048,
+ "Fs": 2 * np.pi}
+
+ f, c = tsa.coherence(np.vstack([x, y]), csd_method=method)
+ c_t = np.abs(signaltools.resample(c_t, c.shape[-1]))
+
+ npt.assert_array_almost_equal(c[0, 1], c_t, 2)
+
+
+def test_coherence_matlab():
+
+ """ Test against coherence values calculated with matlab's mscohere"""
+
+ ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))
+
+ ts0 = ts[1]
+ ts1 = ts[0]
+
+ method = {}
+ method['this_method'] = 'welch'
+ method['NFFT'] = 64
+ method['Fs'] = 1.0
+ method['noverlap'] = method['NFFT'] / 2
+
+ ttt = np.vstack([ts0, ts1])
+ f, cxy_mlab = tsa.coherence(ttt, csd_method=method)
+ cxy_matlab = np.loadtxt(os.path.join(test_dir_path, 'cxy_matlab.txt'))
+
+ npt.assert_almost_equal(cxy_mlab[0][1], cxy_matlab, decimal=5)
+
+@npt.dec.skipif(old_mpl)
+def test_cached_coherence():
+ """Testing the cached coherence functions """
+ NFFT = 64 # This is the default behavior
+ n_freqs = NFFT // 2 + 1
+ ij = [(0, 1), (1, 0)]
+ ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))
+ freqs, cache = tsa.cache_fft(ts, ij)
+
+ # Are the frequencies the right ones?
+ npt.assert_equal(freqs, utils.get_freqs(2 * np.pi, NFFT))
+
+ # Check that the fft of the first window is what we expect:
+ hann = mlab.window_hanning(np.ones(NFFT))
+ w_ts = ts[0][:NFFT] * hann
+ w_ft = fftpack.fft(w_ts)[0:n_freqs]
+
+ # This is the result of the function:
+ first_window_fft = cache['FFT_slices'][0][0]
+
+ npt.assert_equal(w_ft, first_window_fft)
+
+ coh_cached = tsa.cache_to_coherency(cache, ij)[0, 1]
+ f, c = tsa.coherency(ts)
+ coh_direct = c[0, 1]
+
+ npt.assert_almost_equal(coh_direct, coh_cached)
+
+ # Only welch PSD works and an error is thrown otherwise. This tests that
+ # the error is thrown:
+ npt.assert_raises(ValueError, tsa.cache_fft, ts, ij, method=methods[2])
+
+ # Take the method in which the window is defined on input:
+ freqs, cache1 = tsa.cache_fft(ts, ij, method=methods[3])
+ # And compare it to the method in which it isn't:
+ freqs, cache2 = tsa.cache_fft(ts, ij, method=methods[4])
+ npt.assert_equal(cache1, cache2)
+
+ # Do the same, while setting scale_by_freq to False:
+ freqs, cache1 = tsa.cache_fft(ts, ij, method=methods[3],
+ scale_by_freq=False)
+ freqs, cache2 = tsa.cache_fft(ts, ij, method=methods[4],
+ scale_by_freq=False)
+ npt.assert_equal(cache1, cache2)
+
+ # Test cache_to_psd:
+ psd1 = tsa.cache_to_psd(cache, ij)[0]
+ # Against the standard get_spectra:
+ f, c = tsa.get_spectra(ts)
+ psd2 = c[0][0]
+
+ npt.assert_almost_equal(psd1, psd2)
+
+ # Test that prefer_speed_over_memory doesn't change anything:
+ freqs, cache1 = tsa.cache_fft(ts, ij)
+ freqs, cache2 = tsa.cache_fft(ts, ij, prefer_speed_over_memory=True)
+ psd1 = tsa.cache_to_psd(cache1, ij)[0]
+ psd2 = tsa.cache_to_psd(cache2, ij)[0]
+ npt.assert_almost_equal(psd1, psd2)
+
+
+# XXX This is not testing anything substantial for now - I am not sure what to
+# test here...
+def test_cache_to_phase():
+ """
+ Test phase calculations from cached windowed FFT
+
+ """
+ ij = [(0, 1), (1, 0)]
+ x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])
+ y = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + np.random.rand(t.shape[-1])
+ ts = np.vstack([x, y])
+ freqs, cache = tsa.cache_fft(ts, ij)
+ ph = tsa.cache_to_phase(cache, ij)
+
+
+def test_cache_to_coherency():
+ """
+
+ Test cache_to_coherency against the standard coherency calculation
+
+ """
+ ij = [(0, 1), (1, 0)]
+ ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))
+ freqs, cache = tsa.cache_fft(ts, ij)
+ Cxy = tsa.cache_to_coherency(cache, ij)
+ f, c = tsa.coherency(ts)
+ npt.assert_almost_equal(Cxy[0][1], c[0, 1])
+
+ # Check that it doesn't matter if you prefer_speed_over_memory:
+ freqs, cache2 = tsa.cache_fft(ts, ij, prefer_speed_over_memory=True)
+ Cxy2 = tsa.cache_to_coherency(cache2, ij)
+
+ npt.assert_equal(Cxy2, Cxy)
+
+ # XXX Calculating the angle of the averaged psd and calculating the average
+ # of the angles calculated over different windows does not yield exactly
+ # the same number, because the angle is not a linear functions (arctan),
+ # so it is unclear how to test this, but we make sure that it runs,
+ # whether or not you prefer_speed_over_memory:
+ freqs, cache = tsa.cache_fft(ts, ij)
+ tsa.cache_to_relative_phase(cache, ij)
+
+ freqs, cache = tsa.cache_fft(ts, ij, prefer_speed_over_memory=True)
+ tsa.cache_to_relative_phase(cache, ij)
+
+ # Check that things run alright, even if there is just one window for the
+ # entire ts:
+ freqs, cache = tsa.cache_fft(ts, ij, method=dict(this_method='welch',
+ NFFT=ts.shape[-1],
+ n_overlap=0))
+
+ cxy_one_window = tsa.cache_to_coherency(cache, ij)
+ ph_one_window = tsa.cache_to_relative_phase(cache, ij)
+
+ # And whether or not you prefer_speed_over_memory
+ freqs, cache = tsa.cache_fft(ts, ij, method=dict(this_method='welch',
+ NFFT=ts.shape[-1],
+ n_overlap=0),
+ prefer_speed_over_memory=True)
+
+ cxy_one_window = tsa.cache_to_coherency(cache, ij)
+ ph_one_window = tsa.cache_to_relative_phase(cache, ij)
diff --git a/nitime/algorithms/tests/test_correlation.py b/nitime/algorithms/tests/test_correlation.py
new file mode 100644
index 0000000..be856d2
--- /dev/null
+++ b/nitime/algorithms/tests/test_correlation.py
@@ -0,0 +1,14 @@
+import numpy as np
+import numpy.testing as npt
+
+import nitime.algorithms as tsa
+
+def test_seed_correlation():
+
+ seed = np.random.rand(10)
+ targ = np.random.rand(10, 10)
+
+ our_coef_array = tsa.seed_corrcoef(seed, targ)
+ np_coef_array = np.array([np.corrcoef(seed, a)[0, 1] for a in targ])
+
+ npt.assert_array_almost_equal(our_coef_array, np_coef_array)
diff --git a/nitime/algorithms/tests/test_event_related.py b/nitime/algorithms/tests/test_event_related.py
new file mode 100644
index 0000000..c69fcf2
--- /dev/null
+++ b/nitime/algorithms/tests/test_event_related.py
@@ -0,0 +1,28 @@
+import numpy as np
+import numpy.testing as npt
+import nitime
+import nitime.algorithms as tsa
+
+
+def test_xcorr_zscored():
+ """
+
+ Test this function, which is not otherwise tested in the testing of the
+ EventRelatedAnalyzer
+
+ """
+
+ cycles = 10
+ l = 1024
+ unit = 2 * np.pi / l
+ t = np.arange(0, 2 * np.pi + unit, unit)
+ signal = np.sin(cycles * t)
+ events = np.zeros(t.shape)
+ #Zero crossings:
+ idx = np.where(np.abs(signal) < 0.03)[0]
+ #An event occurs at the beginning of every cycle:
+ events[idx[:-2:2]] = 1
+
+ a = tsa.freq_domain_xcorr_zscored(signal, events, 1000, 1000)
+ npt.assert_almost_equal(np.mean(a), 0, 1)
+ npt.assert_almost_equal(np.std(a), 1, 1)
diff --git a/nitime/algorithms/tests/test_spectral.py b/nitime/algorithms/tests/test_spectral.py
new file mode 100644
index 0000000..ad22390
--- /dev/null
+++ b/nitime/algorithms/tests/test_spectral.py
@@ -0,0 +1,479 @@
+"""
+Tests for the algorithms.spectral submodule
+
+"""
+
+import numpy as np
+import scipy
+from scipy import fftpack
+import numpy.testing as npt
+import numpy.testing.decorators as dec
+import nose.tools as nt
+
+import nitime.algorithms as tsa
+import nitime.utils as utils
+
+
+def test_get_spectra():
+ """
+
+ Testing spectral estimation
+
+ """
+
+ methods = (None,
+ {"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
+ {"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})
+
+ for method in methods:
+ avg_pwr1 = []
+ avg_pwr2 = []
+ est_pwr1 = []
+ est_pwr2 = []
+ arsig1, _, _ = utils.ar_generator(N=2 ** 16) # needs to be that long
+ # for the answers to converge
+ arsig2, _, _ = utils.ar_generator(N=2 ** 16)
+
+ avg_pwr1.append((arsig1 ** 2).mean())
+ avg_pwr2.append((arsig2 ** 2).mean())
+
+ tseries = np.vstack([arsig1, arsig2])
+
+ f, c = tsa.get_spectra(tseries, method=method)
+
+ # \sum_{\omega} psd d\omega:
+ est_pwr1.append(np.sum(c[0, 0]) * (f[1] - f[0]))
+ est_pwr2.append(np.sum(c[1, 1]) * (f[1] - f[0]))
+
+ # Get it right within the order of magnitude:
+ npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
+ npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)
+
+
+def test_get_spectra_complex():
+ """
+
+ Testing spectral estimation
+
+ """
+
+ methods = (None,
+ {"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
+ {"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})
+
+ for method in methods:
+ avg_pwr1 = []
+ avg_pwr2 = []
+ est_pwr1 = []
+ est_pwr2 = []
+
+ # Make complex signals:
+ r, _, _ = utils.ar_generator(N=2 ** 16) # It needs to be that long for
+ # the answers to converge
+ c, _, _ = utils.ar_generator(N=2 ** 16)
+ arsig1 = r + c * scipy.sqrt(-1)
+
+ r, _, _ = utils.ar_generator(N=2 ** 16)
+ c, _, _ = utils.ar_generator(N=2 ** 16)
+
+ arsig2 = r + c * scipy.sqrt(-1)
+ avg_pwr1.append((arsig1 * arsig1.conjugate()).mean())
+ avg_pwr2.append((arsig2 * arsig2.conjugate()).mean())
+
+ tseries = np.vstack([arsig1, arsig2])
+
+ f, c = tsa.get_spectra(tseries, method=method)
+
+ # \sum_{\omega} psd d\omega:
+ est_pwr1.append(np.sum(c[0, 0]) * (f[1] - f[0]))
+ est_pwr2.append(np.sum(c[1, 1]) * (f[1] - f[0]))
+
+ # Get it right within the order of magnitude:
+ npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
+ npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)
+
+
+def test_get_spectra_unknown_method():
+ """
+ Test that providing an unknown method to get_spectra rasies a ValueError
+
+ """
+ tseries = np.array([[1, 2, 3], [4, 5, 6]])
+ npt.assert_raises(ValueError,
+ tsa.get_spectra, tseries, method=dict(this_method='foo'))
+
+
+def test_periodogram():
+ """Test some of the inputs to periodogram """
+
+ arsig, _, _ = utils.ar_generator(N=1024)
+ Sk = fftpack.fft(arsig)
+
+ f1, c1 = tsa.periodogram(arsig)
+ f2, c2 = tsa.periodogram(arsig, Sk=Sk)
+
+ npt.assert_equal(c1, c2)
+
+ # Check that providing a complex signal does the right thing
+ # (i.e. two-sided spectrum):
+ N = 1024
+ r, _, _ = utils.ar_generator(N=N)
+ c, _, _ = utils.ar_generator(N=N)
+ arsig = r + c * scipy.sqrt(-1)
+
+ f, c = tsa.periodogram(arsig)
+ npt.assert_equal(f.shape[0], N) # Should be N, not the one-sided N/2 + 1
+
+
+def test_periodogram_csd():
+ """Test corner cases of periodogram_csd"""
+
+ arsig1, _, _ = utils.ar_generator(N=1024)
+ arsig2, _, _ = utils.ar_generator(N=1024)
+
+ tseries = np.vstack([arsig1, arsig2])
+
+ Sk = fftpack.fft(tseries)
+
+ f1, c1 = tsa.periodogram_csd(tseries)
+ f2, c2 = tsa.periodogram_csd(tseries, Sk=Sk)
+ npt.assert_equal(c1, c2)
+
+ # Check that providing a complex signal does the right thing
+ # (i.e. two-sided spectrum):
+ N = 1024
+ r, _, _ = utils.ar_generator(N=N)
+ c, _, _ = utils.ar_generator(N=N)
+ arsig1 = r + c * scipy.sqrt(-1)
+
+ r, _, _ = utils.ar_generator(N=N)
+ c, _, _ = utils.ar_generator(N=N)
+ arsig2 = r + c * scipy.sqrt(-1)
+
+ tseries = np.vstack([arsig1, arsig2])
+
+ f, c = tsa.periodogram_csd(tseries)
+ npt.assert_equal(f.shape[0], N) # Should be N, not the one-sided N/2 + 1
+
+
+def test_dpss_windows():
+ """ Test a funky corner case of DPSS_windows """
+
+ N = 1024
+ NW = 0 # Setting NW to 0 triggers the weird corner case in which some of
+ # the symmetric tapers have a negative average
+ Kmax = 7
+
+ # But that's corrected by the algorithm:
+ d, w = tsa.dpss_windows(1024, 0, 7)
+ for this_d in d[0::2]:
+ npt.assert_equal(this_d.sum(axis=-1) < 0, False)
+
+def test_dpss_properties():
+ """ Test conventions of Slepian eigenvectors """
+
+ N = 2000
+ NW = 200
+ d, lam = tsa.dpss_windows(N, NW, 2*NW-2)
+ # 2NW-2 lamdas should be all > 0.9
+ nt.assert_true(
+ (lam > 0.9).all(), 'Eigenvectors show poor spectral concentration'
+ )
+ # test orthonomality
+ err = np.linalg.norm(d.dot(d.T) - np.eye(2*NW-2), ord='fro')
+ nt.assert_true(err**2 < 1e-16, 'Eigenvectors not numerically orthonormal')
+ # test positivity of even functions
+ nt.assert_true(
+ (d[::2].sum(axis=1) > 0).all(),
+ 'Even Slepian sequences should have positive DC'
+ )
+ # test positive initial slope of odd functions
+ # (this tests the sign of a linear slope)
+ pk = np.argmax(np.abs(d[1::2, :N/2]), axis=1)
+ t = True
+ for p, f in zip(pk, d[1::2]):
+ t = t and np.sum( np.arange(1,p+1) * f[:p] ) >= 0
+ nt.assert_true(t, 'Odd Slepians should begin positive-going')
+
+def test_get_spectra_bi():
+ """
+
+ Test the bi-variate get_spectra function
+
+ """
+
+ methods = (None,
+ {"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
+ {"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})
+
+ for method in methods:
+ arsig1, _, _ = utils.ar_generator(N=2 ** 16)
+ arsig2, _, _ = utils.ar_generator(N=2 ** 16)
+
+ avg_pwr1 = (arsig1 ** 2).mean()
+ avg_pwr2 = (arsig2 ** 2).mean()
+ avg_xpwr = (arsig1 * arsig2.conjugate()).mean()
+
+ tseries = np.vstack([arsig1, arsig2])
+
+ f, fxx, fyy, fxy = tsa.get_spectra_bi(arsig1, arsig2, method=method)
+
+ # \sum_{\omega} PSD(\omega) d\omega:
+ est_pwr1 = np.sum(fxx * (f[1] - f[0]))
+ est_pwr2 = np.sum(fyy * (f[1] - f[0]))
+ est_xpwr = np.sum(fxy * (f[1] - f[0])).real
+
+ # Test that we have the right order of magnitude:
+ npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
+ npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)
+ npt.assert_array_almost_equal(np.mean(est_xpwr),
+ np.mean(avg_xpwr),
+ decimal=-1)
+
+
+def test_mtm_lin_combo():
+ "Test the functionality of cross and autospectrum MTM combinations"
+ spec1 = np.random.randn(5, 100) + 1j * np.random.randn(5, 100)
+ spec2 = np.random.randn(5, 100) + 1j * np.random.randn(5, 100)
+ # test on both broadcasted weights and per-point weights
+ for wshape in ((2, 5, 1), (2, 5, 100)):
+ weights = np.random.randn(*wshape)
+ sides = 'onesided'
+ mtm_cross = tsa.mtm_cross_spectrum(
+ spec1, spec2, (weights[0], weights[1]), sides=sides
+ )
+ nt.assert_true(mtm_cross.dtype in np.sctypes['complex'],
+ 'Wrong dtype for crossspectrum')
+ nt.assert_true(len(mtm_cross) == 51,
+ 'Wrong length for halfband spectrum')
+ sides = 'twosided'
+ mtm_cross = tsa.mtm_cross_spectrum(
+ spec1, spec2, (weights[0], weights[1]), sides=sides
+ )
+ nt.assert_true(len(mtm_cross) == 100,
+ 'Wrong length for fullband spectrum')
+ sides = 'onesided'
+ mtm_auto = tsa.mtm_cross_spectrum(
+ spec1, spec1, weights[0], sides=sides
+ )
+ nt.assert_true(mtm_auto.dtype in np.sctypes['float'],
+ 'Wrong dtype for autospectrum')
+ nt.assert_true(len(mtm_auto) == 51,
+ 'Wrong length for halfband spectrum')
+ sides = 'twosided'
+ mtm_auto = tsa.mtm_cross_spectrum(
+ spec1, spec2, weights[0], sides=sides
+ )
+ nt.assert_true(len(mtm_auto) == 100,
+ 'Wrong length for fullband spectrum')
+
+
+def test_mtm_cross_spectrum():
+ """
+
+ Test the multi-taper cross-spectral estimation. Based on the example in
+ doc/examples/multi_taper_coh.py
+
+ """
+ NW = 4
+ K = 2 * NW - 1
+
+ N = 2 ** 10
+ n_reps = 10
+ n_freqs = N
+
+ tapers, eigs = tsa.dpss_windows(N, NW, 2 * NW - 1)
+
+ est_psd = []
+ for k in range(n_reps):
+ data, nz, alpha = utils.ar_generator(N=N)
+ fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=n_freqs)
+ # 'one-sided', so multiply by 2:
+ psd = 2 * (hz * hz.conj()).real
+
+ tdata = tapers * data
+
+ tspectra = fftpack.fft(tdata)
+
+ L = N / 2 + 1
+ sides = 'onesided'
+ w, _ = utils.adaptive_weights(tspectra, eigs, sides=sides)
+
+ sxx = tsa.mtm_cross_spectrum(tspectra, tspectra, w, sides=sides)
+ est_psd.append(sxx)
+
+ fxx = np.mean(est_psd, 0)
+
+ psd_ratio = np.mean(fxx / psd)
+
+ # This is a rather lenient test, making sure that the average ratio is 1 to
+ # within an order of magnitude. That is, that they are equal on average:
+ npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)
+
+ # Test raising of error in case the inputs don't make sense:
+ npt.assert_raises(ValueError,
+ tsa.mtm_cross_spectrum,
+ tspectra, np.r_[tspectra, tspectra],
+ (w, w))
+
+
+@dec.slow
+def test_multi_taper_psd_csd():
+ """
+
+ Test the multi taper psd and csd estimation functions.
+ Based on the example in
+ doc/examples/multi_taper_spectral_estimation.py
+
+ """
+
+ N = 2 ** 10
+ n_reps = 10
+
+ psd = []
+ est_psd = []
+ est_csd = []
+ for jk in [True, False]:
+ for k in range(n_reps):
+ for adaptive in [True, False]:
+ ar_seq, nz, alpha = utils.ar_generator(N=N, drop_transients=10)
+ ar_seq -= ar_seq.mean()
+ fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha],
+ n_freqs=N)
+ psd.append(2 * (hz * hz.conj()).real)
+ f, psd_mt, nu = tsa.multi_taper_psd(ar_seq, adaptive=adaptive,
+ jackknife=jk)
+ est_psd.append(psd_mt)
+ f, csd_mt = tsa.multi_taper_csd(np.vstack([ar_seq, ar_seq]),
+ adaptive=adaptive)
+ # Symmetrical in this case, so take one element out:
+ est_csd.append(csd_mt[0][1])
+
+ fxx = np.mean(psd, axis=0)
+ fxx_est1 = np.mean(est_psd, axis=0)
+ fxx_est2 = np.mean(est_csd, axis=0)
+
+ # Tests the psd:
+ psd_ratio1 = np.mean(fxx_est1 / fxx)
+ npt.assert_array_almost_equal(psd_ratio1, 1, decimal=-1)
+ # Tests the csd:
+ psd_ratio2 = np.mean(fxx_est2 / fxx)
+ npt.assert_array_almost_equal(psd_ratio2, 1, decimal=-1)
+
+
+def test_gh57():
+ """
+ https://github.com/nipy/nitime/issues/57
+ """
+ data = np.random.randn(10, 1000)
+ for jk in [True, False]:
+ for adaptive in [True, False]:
+ f, psd, sigma = tsa.multi_taper_psd(data, adaptive=adaptive,
+ jackknife=jk)
+
+
+def test_hermitian_periodogram_csd():
+ """
+ Make sure CSD matrices returned by various methods have
+ Hermitian symmetry.
+ """
+
+ sig = np.random.randn(4,256)
+
+ _, csd1 = tsa.periodogram_csd(sig)
+
+ for i in range(4):
+ for j in range(i+1):
+ xc1 = csd1[i,j]
+ xc2 = csd1[j,i]
+ npt.assert_equal(
+ xc1, xc2.conj(), err_msg='Periodogram CSD not Hermitian'
+ )
+
+ _, psd = tsa.periodogram(sig)
+ for i in range(4):
+ npt.assert_almost_equal(
+ psd[i], csd1[i,i].real,
+ err_msg='Periodgram CSD diagonal inconsistent with real PSD'
+ )
+
+def test_hermitian_multitaper_csd():
+ """
+ Make sure CSD matrices returned by various methods have
+ Hermitian symmetry.
+ """
+
+ sig = np.random.randn(4,256)
+
+ _, csd1 = tsa.multi_taper_csd(sig, adaptive=False)
+
+ for i in range(4):
+ for j in range(i+1):
+ xc1 = csd1[i,j]
+ xc2 = csd1[j,i]
+ npt.assert_equal(
+ xc1, xc2.conj(), err_msg='MTM CSD not Hermitian'
+ )
+
+ _, psd, _ = tsa.multi_taper_psd(sig, adaptive=False)
+ for i in range(4):
+ npt.assert_almost_equal(
+ psd[i], csd1[i,i].real,
+ err_msg='MTM CSD diagonal inconsistent with real PSD'
+ )
+
+def test_periodogram_spectral_normalization():
+ """
+ Check that the spectral estimators are normalized in the
+ correct Watts/Hz fashion
+ """
+
+ x = np.random.randn(1024)
+ f1, Xp1 = tsa.periodogram(x)
+ f2, Xp2 = tsa.periodogram(x, Fs=100)
+ f3, Xp3 = tsa.periodogram(x, N=2**12)
+
+ p1 = np.sum(Xp1) * 2 * np.pi / 2**10
+ p2 = np.sum(Xp2) * 100 / 2**10
+ p3 = np.sum(Xp3) * 2 * np.pi / 2**12
+ nt.assert_true( np.abs(p1 - p2) < 1e-14,
+ 'Inconsistent frequency normalization in periodogram (1)' )
+ nt.assert_true( np.abs(p3 - p2) < 1e-8,
+ 'Inconsistent frequency normalization in periodogram (2)' )
+
+ td_var = np.var(x)
+ # assure that the estimators are at least in the same
+ # order of magnitude as the time-domain variance
+ nt.assert_true( np.abs(np.log10(p1/td_var)) < 1,
+ 'Incorrect frequency normalization in periodogram' )
+
+ # check the freq vector while we're here
+ nt.assert_true( f2.max() == 50, 'Periodogram returns wrong frequency bins' )
+
+def test_multitaper_spectral_normalization():
+ """
+ Check that the spectral estimators are normalized in the
+ correct Watts/Hz fashion
+ """
+
+ x = np.random.randn(1024)
+ f1, Xp1, _ = tsa.multi_taper_psd(x)
+ f2, Xp2, _ = tsa.multi_taper_psd(x, Fs=100)
+ f3, Xp3, _ = tsa.multi_taper_psd(x, NFFT=2**12)
+
+ p1 = np.sum(Xp1) * 2 * np.pi / 2**10
+ p2 = np.sum(Xp2) * 100 / 2**10
+ p3 = np.sum(Xp3) * 2 * np.pi / 2**12
+ nt.assert_true( np.abs(p1 - p2) < 1e-14,
+ 'Inconsistent frequency normalization in MTM PSD (1)' )
+ nt.assert_true( np.abs(p3 - p2) < 1e-8,
+ 'Inconsistent frequency normalization in MTM PSD (2)' )
+
+ td_var = np.var(x)
+ # assure that the estimators are at least in the same
+ # order of magnitude as the time-domain variance
+ nt.assert_true( np.abs(np.log10(p1/td_var)) < 1,
+ 'Incorrect frequency normalization in MTM PSD' )
+
+ # check the freq vector while we're here
+ nt.assert_true( f2.max() == 50, 'MTM PSD returns wrong frequency bins' )
diff --git a/nitime/algorithms/wavelet.py b/nitime/algorithms/wavelet.py
new file mode 100644
index 0000000..1e43989
--- /dev/null
+++ b/nitime/algorithms/wavelet.py
@@ -0,0 +1,107 @@
+"""
+
+Wavelets
+
+"""
+
+import numpy as np
+from nitime.lazy import scipy_fftpack as fftpack
+
+
+def wfmorlet_fft(f0, sd, sampling_rate, ns=5, nt=None):
+ """
+ returns a complex morlet wavelet in the frequency domain
+
+ Parameters
+ ----------
+ f0 : center frequency
+ sd : standard deviation of center frequency
+ sampling_rate : samplingrate
+ ns : window length in number of standard deviations
+ nt : window length in number of sample points
+ """
+ if nt == None:
+ st = 1. / (2. * np.pi * sd)
+ nt = 2 * int(ns * st * sampling_rate) + 1
+ f = fftpack.fftfreq(nt, 1. / sampling_rate)
+ wf = 2 * np.exp(-(f - f0) ** 2 / (2 * sd ** 2)) * np.sqrt(sampling_rate /
+ (np.sqrt(np.pi) * sd))
+ wf[f < 0] = 0
+ wf[f == 0] /= 2
+ return wf
+
+
+def wmorlet(f0, sd, sampling_rate, ns=5, normed='area'):
+ """
+ returns a complex morlet wavelet in the time domain
+
+ Parameters
+ ----------
+ f0 : center frequency
+ sd : standard deviation of frequency
+ sampling_rate : samplingrate
+ ns : window length in number of standard deviations
+ """
+ st = 1. / (2. * np.pi * sd)
+ w_sz = float(int(ns * st * sampling_rate)) # half time window size
+ t = np.arange(-w_sz, w_sz + 1, dtype=float) / sampling_rate
+ if normed == 'area':
+ w = np.exp(-t ** 2 / (2. * st ** 2)) * np.exp(
+ 2j * np.pi * f0 * t) / np.sqrt(np.sqrt(np.pi) * st * sampling_rate)
+ elif normed == 'max':
+ w = np.exp(-t ** 2 / (2. * st ** 2)) * np.exp(
+ 2j * np.pi * f0 * t) * 2 * sd * np.sqrt(2 * np.pi) / sampling_rate
+ else:
+ assert 0, 'unknown norm %s' % normed
+ return w
+
+
+def wlogmorlet_fft(f0, sd, sampling_rate, ns=5, nt=None):
+ """
+ returns a complex log morlet wavelet in the frequency domain
+
+ Parameters
+ ----------
+ f0 : center frequency
+ sd : standard deviation
+ sampling_rate : samplingrate
+ ns : window length in number of standard deviations
+ nt : window length in number of sample points
+ """
+ if nt == None:
+ st = 1. / (2. * np.pi * sd)
+ nt = 2 * int(ns * st * sampling_rate) + 1
+ f = fftpack.fftfreq(nt, 1. / sampling_rate)
+
+ sfl = np.log(1 + 1. * sd / f0)
+ wf = (2 * np.exp(-(np.log(f) - np.log(f0)) ** 2 / (2 * sfl ** 2)) *
+ np.sqrt(sampling_rate / (np.sqrt(np.pi) * sd)))
+ wf[f < 0] = 0
+ wf[f == 0] /= 2
+ return wf
+
+
+def wlogmorlet(f0, sd, sampling_rate, ns=5, normed='area'):
+ """
+ returns a complex log morlet wavelet in the time domain
+
+ Parameters
+ ----------
+ f0 : center frequency
+ sd : standard deviation of frequency
+ sampling_rate : samplingrate
+ ns : window length in number of standard deviations
+ """
+ st = 1. / (2. * np.pi * sd)
+ w_sz = int(ns * st * sampling_rate) # half time window size
+ wf = wlogmorlet_fft(f0, sd, sampling_rate=sampling_rate, nt=2 * w_sz + 1)
+ w = fftpack.fftshift(fftpack.ifft(wf))
+ if normed == 'area':
+ w /= w.real.sum()
+ elif normed == 'max':
+ w /= w.real.max()
+ elif normed == 'energy':
+ w /= np.sqrt((w ** 2).sum())
+ else:
+ assert 0, 'unknown norm %s' % normed
+ return w
diff --git a/nitime/analysis/__init__.py b/nitime/analysis/__init__.py
new file mode 100644
index 0000000..ba21901
--- /dev/null
+++ b/nitime/analysis/__init__.py
@@ -0,0 +1,33 @@
+"""
+Nitime analysis
+---------------
+
+This module implements an analysis interface between between time-series
+objects implemented in the :mod:`timeseries` module and the algorithms provided
+in the :mod:`algorithms` library and other algorithms.
+
+The general pattern of use of Analyzer objects is that they an object is
+initialized with a TimeSeries object as input. Depending on the analysis
+methods implemented in the particular analysis object, additional inputs may
+also be required.
+
+The methods of the object are then implemented as instances of
+:obj:`OneTimeProperty`, which means that they are only calculated when they are
+needed and then cached for further use.
+
+Analyzer objects are generally implemented inheriting the
+:func:`desc.ResetMixin`, which means that they have a :meth:`reset`
+method. This method resets the object to its initialized state, in which none
+of the :obj:`OneTimeProperty` methods have been calculated. This allows to
+change parameter settings of the object and recalculating the quantities in
+these methods with the new parameter setting.
+
+"""
+
+from nitime.analysis.coherence import *
+from nitime.analysis.correlation import *
+from nitime.analysis.event_related import *
+from nitime.analysis.normalization import *
+from nitime.analysis.snr import *
+from nitime.analysis.spectral import *
+from nitime.analysis.granger import *
diff --git a/nitime/analysis/base.py b/nitime/analysis/base.py
new file mode 100644
index 0000000..2541c3e
--- /dev/null
+++ b/nitime/analysis/base.py
@@ -0,0 +1,43 @@
+
+from inspect import getargspec
+
+from nitime import descriptors as desc
+
+
+class BaseAnalyzer(desc.ResetMixin):
+ """
+ Analyzer that implements the default data flow.
+
+ All analyzers inherit from this class at least have to
+ * implement a __init__ function to set parameters
+ * define the 'output' property
+
+ """
+
+ @desc.setattr_on_read
+ def parameterlist(self):
+ plist = getargspec(self.__init__).args
+ plist.remove('self')
+ plist.remove('input')
+ return plist
+
+ @property
+ def parameters(self):
+ return dict([(p,
+ getattr(self, p, 'MISSING')) for p in self.parameterlist])
+
+ def __init__(self, input=None):
+ self.input = input
+
+ def set_input(self, input):
+ """Set the input of the analyzer, if you want to reuse the analyzer
+ with a different input than the original """
+
+ self.reset()
+ self.input = input
+
+ def __repr__(self):
+ params = ', '.join(['%s=%r' % (p, getattr(self, p, 'MISSING'))
+ for p in self.parameterlist])
+
+ return '%s(%s)' % (self.__class__.__name__, params)
diff --git a/nitime/analysis/coherence.py b/nitime/analysis/coherence.py
new file mode 100644
index 0000000..d66115e
--- /dev/null
+++ b/nitime/analysis/coherence.py
@@ -0,0 +1,730 @@
+import warnings
+
+import numpy as np
+from nitime.lazy import scipy_stats_distributions as dist
+from nitime.lazy import scipy_fftpack as fftpack
+
+from nitime import descriptors as desc
+from nitime import utils as tsu
+from nitime import algorithms as tsa
+
+# To suppport older versions of numpy that don't have tril_indices:
+from nitime.index_utils import tril_indices, triu_indices
+
+from .base import BaseAnalyzer
+
+
+class CoherenceAnalyzer(BaseAnalyzer):
+ """Analyzer object for coherence/coherency analysis """
+
+ def __init__(self, input=None, method=None, unwrap_phases=False):
+ """
+
+ Parameters
+ ----------
+
+ input : TimeSeries object
+ Containing the data to analyze.
+
+ method : dict, optional,
+ This is the method used for spectral analysis of the signal for the
+ coherence caclulation. See :func:`algorithms.get_spectra`
+ documentation for details.
+
+ unwrap_phases : bool, optional
+ Whether to unwrap the phases. This should be True if you assume that
+ the time-delay is the same for all the frequency bands. See
+ _[Sun2005] for details. Default : False
+
+ Examples
+ --------
+ >>> import nitime.timeseries as ts
+ >>> np.set_printoptions(precision=4) # for doctesting
+ >>> t1 = ts.TimeSeries(data = np.arange(0,1024,1).reshape(2,512),
+ ... sampling_rate=np.pi)
+ >>> c1 = CoherenceAnalyzer(t1)
+ >>> c1.method['Fs'] # doctest: +ELLIPSIS
+ 3.1415926535... Hz
+ >>> c1.method['this_method']
+ 'welch'
+ >>> c1.coherence[0,1]
+ array([ 0.9024, 0.9027, 0.9652, 0.9433, 0.9297, 0.9213, 0.9161,
+ 0.9126, 0.9102, 0.9085, 0.9072, 0.9063, 0.9055, 0.905 ,
+ 0.9045, 0.9041, 0.9038, 0.9036, 0.9034, 0.9032, 0.9031,
+ 0.9029, 0.9028, 0.9027, 0.9027, 0.9026, 0.9026, 0.9025,
+ 0.9025, 0.9025, 0.9025, 0.9026, 1. ])
+ >>> c1.phase[0,1]
+ array([ 0. , -0.035 , -0.4839, -0.4073, -0.3373, -0.2828, -0.241 ,
+ -0.2085, -0.1826, -0.1615, -0.144 , -0.1292, -0.1164, -0.1054,
+ -0.0956, -0.0869, -0.0791, -0.072 , -0.0656, -0.0596, -0.0541,
+ -0.0489, -0.0441, -0.0396, -0.0353, -0.0314, -0.0277, -0.0244,
+ -0.0216, -0.0197, -0.0198, -0.028 , 0. ])
+
+ """
+ BaseAnalyzer.__init__(self, input)
+
+ # Set the variables for spectral estimation (can also be entered by
+ # user):
+ if method is None:
+ self.method = {'this_method': 'welch',
+ 'Fs': self.input.sampling_rate}
+ else:
+ self.method = method
+
+ # If an input is provided, get the sampling rate from there, if you
+ # want to over-ride that, input a method with a 'Fs' field specified:
+ self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)
+
+ self._unwrap_phases = unwrap_phases
+
+ # The following only applies to the welch method:
+ if (self.method.get('this_method') == 'welch' or
+ self.method.get('this_method') is None):
+
+ # If the input is shorter than NFFT, all the coherences will be
+ # 1 per definition. Throw a warning about that:
+ self.method['NFFT'] = self.method.get('NFFT', tsa.default_nfft)
+ self.method['n_overlap'] = self.method.get('n_overlap',
+ tsa.default_n_overlap)
+ if (self.input.shape[-1] <
+ (self.method['NFFT'] + self.method['n_overlap'])):
+ e_s = "In nitime.analysis, the provided input time-series is"
+ e_s += " shorter than the requested NFFT + n_overlap. All "
+ e_s += "coherence values will be set to 1."
+ warnings.warn(e_s, RuntimeWarning)
+
+ @desc.setattr_on_read
+ def coherency(self):
+ """The standard output for this kind of analyzer is the coherency """
+ data = self.input.data
+ tseries_length = data.shape[0]
+ spectrum_length = self.spectrum.shape[-1]
+
+ coherency = np.zeros((tseries_length,
+ tseries_length,
+ spectrum_length), dtype=complex)
+
+ for i in range(tseries_length):
+ for j in range(i, tseries_length):
+ coherency[i][j] = tsa.coherency_spec(self.spectrum[i][j],
+ self.spectrum[i][i],
+ self.spectrum[j][j])
+
+ idx = tril_indices(tseries_length, -1)
+ coherency[idx[0], idx[1], ...] = coherency[idx[1], idx[0], ...].conj()
+
+ return coherency
+
+ @desc.setattr_on_read
+ def spectrum(self):
+ """
+ The spectra of each of the channels and cross-spectra between
+ different channles in the input TimeSeries object
+ """
+ f, spectrum = tsa.get_spectra(self.input.data, method=self.method)
+ return spectrum
+
+ @desc.setattr_on_read
+ def frequencies(self):
+ """
+ The central frequencies in the bands
+ """
+
+ #XXX Use NFFT in the method in order to calculate these, without having
+ #to calculate the spectrum:
+ f, spectrum = tsa.get_spectra(self.input.data, method=self.method)
+ return f
+
+ @desc.setattr_on_read
+ def coherence(self):
+ """
+ The coherence between the different channels in the input TimeSeries
+ object
+ """
+
+ #XXX Calculate this from the standard output, instead of recalculating
+ #the coherence:
+
+ tseries_length = self.input.data.shape[0]
+ spectrum_length = self.spectrum.shape[-1]
+ coherence = np.zeros((tseries_length,
+ tseries_length,
+ spectrum_length))
+
+ for i in range(tseries_length):
+ for j in range(i, tseries_length):
+ coherence[i][j] = tsa.coherence_spec(self.spectrum[i][j],
+ self.spectrum[i][i],
+ self.spectrum[j][j])
+
+ idx = tril_indices(tseries_length, -1)
+ coherence[idx[0], idx[1], ...] = coherence[idx[1], idx[0], ...].conj()
+
+ return coherence
+
+ @desc.setattr_on_read
+ def phase(self):
+ """ The frequency-dependent phase relationship between all the pairwise
+ combinations of time-series in the data"""
+
+ #XXX calcluate this from the standard output, instead of recalculating:
+
+ tseries_length = self.input.data.shape[0]
+ spectrum_length = self.spectrum.shape[-1]
+
+ phase = np.zeros((tseries_length,
+ tseries_length,
+ spectrum_length))
+
+ for i in range(tseries_length):
+ for j in range(i, tseries_length):
+ phase[i][j] = np.angle(
+ self.spectrum[i][j])
+
+ phase[j][i] = np.angle(
+ self.spectrum[i][j].conjugate())
+ return phase
+
+ @desc.setattr_on_read
+ def delay(self):
+ """ The delay in seconds between the two time series """
+ p_shape = self.phase.shape[:-1]
+ delay = np.zeros(self.phase.shape)
+ for i in range(p_shape[0]):
+ for j in range(p_shape[1]):
+ this_phase = self.phase[i, j]
+ #If requested, unwrap the phases:
+ if self._unwrap_phases:
+ this_phase = tsu.unwrap_phases(this_phase)
+
+ delay[i, j] = this_phase / (2 * np.pi * self.frequencies)
+
+ return delay
+
+ @desc.setattr_on_read
+ def coherence_partial(self):
+ """The partial coherence between data[i] and data[j], given data[k], as
+ a function of frequency band"""
+
+ tseries_length = self.input.data.shape[0]
+ spectrum_length = self.spectrum.shape[-1]
+
+ p_coherence = np.zeros((tseries_length,
+ tseries_length,
+ tseries_length,
+ spectrum_length))
+
+ for i in range(tseries_length):
+ for j in range(tseries_length):
+ for k in range(tseries_length):
+ if j == k or i == k:
+ pass
+ else:
+ p_coherence[i][j][k] = tsa.coherence_partial_spec(
+ self.spectrum[i][j],
+ self.spectrum[i][i],
+ self.spectrum[j][j],
+ self.spectrum[i][k],
+ self.spectrum[j][k],
+ self.spectrum[k][k])
+
+ idx = tril_indices(tseries_length, -1)
+ p_coherence[idx[0], idx[1], ...] =\
+ p_coherence[idx[1], idx[0], ...].conj()
+
+ return p_coherence
+
+
+class MTCoherenceAnalyzer(BaseAnalyzer):
+ """ Analyzer for multi-taper coherence analysis, including jack-knife
+ estimate of confidence interval """
+ def __init__(self, input=None, bandwidth=None, alpha=0.05, adaptive=True):
+
+ """
+ Initializer function for the MTCoherenceAnalyzer
+
+ Parameters
+ ----------
+
+ input : TimeSeries object
+
+ bandwidth : float,
+ The bandwidth of the windowing function will determine the number
+ tapers to use. This parameters represents trade-off between
+ frequency resolution (lower main lobe bandwidth for the taper) and
+ variance reduction (higher bandwidth and number of averaged
+ estimates). Per default will be set to 4 times the fundamental
+ frequency, such that NW=4
+
+ alpha : float, default =0.05
+ This is the alpha used to construct a confidence interval around
+ the multi-taper csd estimate, based on a jack-knife estimate of the
+ variance [Thompson2007]_.
+
+ adaptive : bool, default to True
+ Whether to set the weights for the tapered spectra according to the
+ adaptive algorithm (Thompson, 2007).
+
+ Notes
+ -----
+
+ Thompson, DJ (2007) Jackknifing multitaper spectrum estimates. IEEE
+ Signal Processing Magazing. 24: 20-30
+
+ """
+
+ BaseAnalyzer.__init__(self, input)
+
+ if input is None:
+ self.NW = 4
+ self.bandwidth = None
+ else:
+ N = input.shape[-1]
+ Fs = self.input.sampling_rate
+ if bandwidth is not None:
+ self.NW = bandwidth / (2 * Fs) * N
+ else:
+ self.NW = 4
+ self.bandwidth = self.NW * (2 * Fs) / N
+
+ self.alpha = alpha
+ self._L = self.input.data.shape[-1] / 2 + 1
+ self._adaptive = adaptive
+
+ @desc.setattr_on_read
+ def tapers(self):
+ return tsa.dpss_windows(self.input.shape[-1], self.NW,
+ 2 * self.NW - 1)[0]
+
+ @desc.setattr_on_read
+ def eigs(self):
+ return tsa.dpss_windows(self.input.shape[-1], self.NW,
+ 2 * self.NW - 1)[1]
+
+ @desc.setattr_on_read
+ def df(self):
+ # The degrees of freedom:
+ return 2 * self.NW - 1
+
+ @desc.setattr_on_read
+ def spectra(self):
+ tdata = self.tapers[None, :, :] * self.input.data[:, None, :]
+ tspectra = fftpack.fft(tdata)
+ return tspectra
+
+ @desc.setattr_on_read
+ def weights(self):
+ channel_n = self.input.data.shape[0]
+ w = np.empty((channel_n, self.df, self._L))
+
+ if self._adaptive:
+ for i in range(channel_n):
+ # this is always a one-sided spectrum?
+ w[i] = tsu.adaptive_weights(self.spectra[i],
+ self.eigs,
+ sides='onesided')[0]
+
+ # Set the weights to be the square root of the eigen-values:
+ else:
+ wshape = [1] * len(self.spectra.shape)
+ wshape[0] = channel_n
+ wshape[-2] = int(self.df)
+ pre_w = np.sqrt(self.eigs) + np.zeros((wshape[0],
+ self.eigs.shape[0]))
+
+ w = pre_w.reshape(*wshape)
+
+ return w
+
+ @desc.setattr_on_read
+ def coherence(self):
+ nrows = self.input.data.shape[0]
+ psd_mat = np.zeros((2, nrows, nrows, self._L), 'd')
+ coh_mat = np.zeros((nrows, nrows, self._L), 'd')
+
+ for i in range(self.input.data.shape[0]):
+ for j in range(i):
+ sxy = tsa.mtm_cross_spectrum(self.spectra[i], self.spectra[j],
+ (self.weights[i], self.weights[j]),
+ sides='onesided')
+ sxx = tsa.mtm_cross_spectrum(self.spectra[i], self.spectra[i],
+ self.weights[i],
+ sides='onesided')
+ syy = tsa.mtm_cross_spectrum(self.spectra[j], self.spectra[j],
+ self.weights[i],
+ sides='onesided')
+ psd_mat[0, i, j] = sxx
+ psd_mat[1, i, j] = syy
+ coh_mat[i, j] = np.abs(sxy) ** 2
+ coh_mat[i, j] /= (sxx * syy)
+
+ idx = triu_indices(self.input.data.shape[0], 1)
+ coh_mat[idx[0], idx[1], ...] = coh_mat[idx[1], idx[0], ...].conj()
+
+ return coh_mat
+
+ @desc.setattr_on_read
+ def confidence_interval(self):
+ """The size of the 1-alpha confidence interval"""
+ coh_var = np.zeros((self.input.data.shape[0],
+ self.input.data.shape[0],
+ self._L), 'd')
+ for i in range(self.input.data.shape[0]):
+ for j in range(i):
+ if i != j:
+ coh_var[i, j] = tsu.jackknifed_coh_variance(
+ self.spectra[i],
+ self.spectra[j],
+ self.eigs,
+ adaptive=self._adaptive
+ )
+
+ idx = triu_indices(self.input.data.shape[0], 1)
+ coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()
+
+ coh_mat_xform = tsu.normalize_coherence(self.coherence,
+ 2 * self.df - 2)
+
+ lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,
+ self.df - 1) * np.sqrt(coh_var)
+ ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,
+ self.df - 1) * np.sqrt(coh_var)
+
+ # convert this measure with the normalizing function
+ tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)
+ tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)
+
+ return ub - lb
+
+ @desc.setattr_on_read
+ def frequencies(self):
+ return np.linspace(0, self.input.sampling_rate / 2, self._L)
+
+
+class SparseCoherenceAnalyzer(BaseAnalyzer):
+ """
+ This analyzer is intended for analysis of large sets of data, in which
+ possibly only a subset of combinations of time-series needs to be compared.
+ The constructor for this class receives as input not only a time-series
+ object, but also a list of tuples with index combinations (i,j) for the
+ combinations. Importantly, this class implements only the mlab csd function
+ and cannot use other methods of spectral estimation
+ """
+
+ def __init__(self, time_series=None, ij=(0, 0), method=None, lb=0, ub=None,
+ prefer_speed_over_memory=True, scale_by_freq=True):
+ """The constructor for the SparseCoherenceAnalyzer
+
+ Parameters
+ ----------
+
+ time_series : a time-series object
+
+ ij : a list of tuples, each containing a pair of indices.
+ The resulting cache will contain the fft of time-series in the rows
+ indexed by the unique elements of the union of i and j
+
+ lb,ub : float,optional, default: lb=0, ub=None (max frequency)
+
+ define a frequency band of interest
+
+ prefer_speed_over_memory: Boolean, optional, default=True
+
+ Does exactly what the name implies. If you have enough memory
+
+ method : optional, dict
+ The method for spectral estimation (see
+ :func:`algorithms.get_spectra`)
+
+ """
+
+ BaseAnalyzer.__init__(self, time_series)
+ #Initialize variables from the time series
+ self.ij = ij
+
+ #Set the variables for spectral estimation (can also be entered by
+ #user):
+ if method is None:
+ self.method = {'this_method': 'welch'}
+
+ else:
+ self.method = method
+
+ if self.method['this_method'] != 'welch':
+ e_s = "For SparseCoherenceAnalyzer, "
+ e_s += "spectral estimation method must be welch"
+ raise ValueError(e_s)
+
+ self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)
+
+ #Additional parameters for the coherency estimation:
+ self.lb = lb
+ self.ub = ub
+ self.prefer_speed_over_memory = prefer_speed_over_memory
+ self.scale_by_freq = scale_by_freq
+
+ @desc.setattr_on_read
+ def coherency(self):
+ """ The default behavior is to calculate the cache, extract it and then
+ output the coherency"""
+ coherency = tsa.cache_to_coherency(self.cache, self.ij)
+
+ return coherency
+
+ @desc.setattr_on_read
+ def coherence(self):
+ """ The coherence values for the output"""
+ coherence = np.abs(self.coherency ** 2)
+
+ return coherence
+
+ @desc.setattr_on_read
+ def cache(self):
+ """Caches the fft windows required by the other methods of the
+ SparseCoherenceAnalyzer. Calculate only once and reuse
+ """
+ data = self.input.data
+ f, cache = tsa.cache_fft(data,
+ self.ij,
+ lb=self.lb,
+ ub=self.ub,
+ method=self.method,
+ prefer_speed_over_memory=self.prefer_speed_over_memory,
+ scale_by_freq=self.scale_by_freq)
+
+ return cache
+
+ @desc.setattr_on_read
+ def spectrum(self):
+ """get the spectrum for the collection of time-series in this analyzer
+ """
+ spectrum = tsa.cache_to_psd(self.cache, self.ij)
+
+ return spectrum
+
+ @desc.setattr_on_read
+ def phases(self):
+ """The frequency-band dependent phases of the spectra of each of the
+ time -series i,j in the analyzer"""
+
+ phase = tsa.cache_to_phase(self.cache, self.ij)
+
+ return phase
+
+ @desc.setattr_on_read
+ def relative_phases(self):
+ """The frequency-band dependent relative phase between the two
+ time-series """
+ return np.angle(self.coherency)
+
+ @desc.setattr_on_read
+ def delay(self):
+ """ The delay in seconds between the two time series """
+ return self.relative_phases / (2 * np.pi * self.frequencies)
+
+ @desc.setattr_on_read
+ def frequencies(self):
+ """Get the central frequencies for the frequency bands, given the
+ method of estimating the spectrum """
+
+ self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)
+ NFFT = self.method.get('NFFT', 64)
+ Fs = self.method.get('Fs')
+ freqs = tsu.get_freqs(Fs, NFFT)
+ lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)
+
+ return freqs[lb_idx:ub_idx]
+
+
+class SeedCoherenceAnalyzer(object):
+ """
+ This analyzer takes two time-series. The first is designated as a
+ time-series of seeds. The other is designated as a time-series of targets.
+ The analyzer performs a coherence analysis between each of the channels in
+ the seed time-series and *all* of the channels in the target time-series.
+
+ Note
+ ----
+
+ This is a convenience class, which provides a convenient-to-use interface
+ to the SparseCoherenceAnalyzer
+
+ """
+
+ def __init__(self, seed_time_series=None, target_time_series=None,
+ method=None, lb=0, ub=None, prefer_speed_over_memory=True,
+ scale_by_freq=True):
+
+ """
+
+ The constructor for the SeedCoherenceAnalyzer
+
+ Parameters
+ ----------
+
+ seed_time_series: a time-series object
+
+ target_time_series: a time-series object
+
+ lb,ub: float,optional, default: lb=0, ub=None (max frequency)
+
+ define a frequency band of interest
+
+ prefer_speed_over_memory: Boolean, optional, default=True
+
+ Makes things go a bit faster, if you have enough memory
+
+
+ """
+
+ self.seed = seed_time_series
+ self.target = target_time_series
+
+ # Check that the seed and the target have the same sampling rate:
+ if self.seed.sampling_rate != self.target.sampling_rate:
+ e_s = "The sampling rate for the seed time-series and the target"
+ e_s += " time-series need to be identical."
+ raise ValueError(e_s)
+
+ #Set the variables for spectral estimation (can also be entered by
+ #user):
+ if method is None:
+ self.method = {'this_method': 'welch'}
+
+ else:
+ self.method = method
+
+ if ('this_method' in self.method.keys() and
+ self.method['this_method'] != 'welch'):
+ e_s = "For SeedCoherenceAnalyzer, "
+ e_s += "spectral estimation method must be welch"
+ raise ValueError(e_s)
+
+ #Additional parameters for the coherency estimation:
+ self.lb = lb
+ self.ub = ub
+ self.prefer_speed_over_memory = prefer_speed_over_memory
+ self.scale_by_freq = scale_by_freq
+
+ @desc.setattr_on_read
+ def coherence(self):
+ """
+ The coherence between each of the channels of the seed time series and
+ all the channels of the target time-series.
+
+ """
+ return np.abs(self.coherency) ** 2
+
+ @desc.setattr_on_read
+ def frequencies(self):
+ """Get the central frequencies for the frequency bands, given the
+ method of estimating the spectrum """
+
+ # Get the sampling rate from the seed time-series:
+ self.method['Fs'] = self.method.get('Fs', self.seed.sampling_rate)
+ NFFT = self.method.get('NFFT', 64)
+ Fs = self.method.get('Fs')
+ freqs = tsu.get_freqs(Fs, NFFT)
+ lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)
+
+ return freqs[lb_idx:ub_idx]
+
+ @desc.setattr_on_read
+ def target_cache(self):
+ data = self.target.data
+
+ #Make a cache with all the fft windows for each of the channels in the
+ #target.
+
+ #This is the kind of input that cache_fft expects:
+ ij = list(zip(np.arange(data.shape[0]), np.arange(data.shape[0])))
+
+ f, cache = tsa.cache_fft(data, ij, lb=self.lb, ub=self.ub,
+ method=self.method,
+ prefer_speed_over_memory=self.prefer_speed_over_memory,
+ scale_by_freq=self.scale_by_freq)
+
+ return cache
+
+ @desc.setattr_on_read
+ def coherency(self):
+
+ #Pre-allocate the final result:
+ if len(self.seed.shape) > 1:
+ Cxy = np.empty((self.seed.data.shape[0],
+ self.target.data.shape[0],
+ self.frequencies.shape[0]), dtype=np.complex)
+ else:
+ Cxy = np.empty((self.target.data.shape[0],
+ self.frequencies.shape[0]), dtype=np.complex)
+
+ #Get the fft window cache for the target time-series:
+ cache = self.target_cache
+
+ #A list of indices for the target:
+ target_chan_idx = np.arange(self.target.data.shape[0])
+
+ #This is a list of indices into the cached fft window libraries,
+ #setting the index of the seed to be -1, so that it is easily
+ #distinguished from the target indices:
+ ij = list(zip(np.ones_like(target_chan_idx) * -1, target_chan_idx))
+
+ #If there is more than one channel in the seed time-series:
+ if len(self.seed.shape) > 1:
+ for seed_idx, this_seed in enumerate(self.seed.data):
+ #Here ij is 0, because it is just one channel and we stack the
+ #channel onto itself in order for the input to the function to
+ #make sense:
+ f, seed_cache = tsa.cache_fft(
+ np.vstack([this_seed, this_seed]),
+ [(0, 0)],
+ lb=self.lb,
+ ub=self.ub,
+ method=self.method,
+ prefer_speed_over_memory=self.prefer_speed_over_memory,
+ scale_by_freq=self.scale_by_freq)
+
+ #Insert the seed_cache into the target_cache:
+ cache['FFT_slices'][-1] = seed_cache['FFT_slices'][0]
+
+ #If this is true, the cache contains both FFT_slices and
+ #FFT_conj_slices:
+ if self.prefer_speed_over_memory:
+ cache['FFT_conj_slices'][-1] = \
+ seed_cache['FFT_conj_slices'][0]
+
+ #This performs the caclulation for this seed:
+ Cxy[seed_idx] = tsa.cache_to_coherency(cache, ij)
+
+ #In the case where there is only one channel in the seed time-series:
+ else:
+ f, seed_cache = tsa.cache_fft(
+ np.vstack([self.seed.data,
+ self.seed.data]),
+ [(0, 0)],
+ lb=self.lb,
+ ub=self.ub,
+ method=self.method,
+ prefer_speed_over_memory=self.prefer_speed_over_memory,
+ scale_by_freq=self.scale_by_freq)
+
+ cache['FFT_slices'][-1] = seed_cache['FFT_slices'][0]
+
+ if self.prefer_speed_over_memory:
+ cache['FFT_conj_slices'][-1] = \
+ seed_cache['FFT_conj_slices'][0]
+
+ Cxy = tsa.cache_to_coherency(cache, ij)
+
+ return Cxy.squeeze()
+
+ @desc.setattr_on_read
+ def relative_phases(self):
+ """The frequency-band dependent relative phase between the two
+ time-series """
+ return np.angle(self.coherency)
+
+ @desc.setattr_on_read
+ def delay(self):
+ """ The delay in seconds between the two time series """
+ return self.relative_phases / (2 * np.pi * self.frequencies)
diff --git a/nitime/analysis/correlation.py b/nitime/analysis/correlation.py
new file mode 100644
index 0000000..a0763b8
--- /dev/null
+++ b/nitime/analysis/correlation.py
@@ -0,0 +1,160 @@
+import numpy as np
+
+from nitime import descriptors as desc
+from nitime import timeseries as ts
+from nitime import algorithms as tsa
+
+# To suppport older versions of numpy that don't have tril_indices:
+from nitime.index_utils import tril_indices
+
+from .base import BaseAnalyzer
+
+
+class CorrelationAnalyzer(BaseAnalyzer):
+ """Analyzer object for correlation analysis. Has the same API as the
+ CoherenceAnalyzer"""
+
+ def __init__(self, input=None):
+ """
+ Parameters
+ ----------
+
+ input : TimeSeries object
+ Containing the data to analyze.
+
+ Examples
+ --------
+ >>> np.set_printoptions(precision=4) # for doctesting
+ >>> t1 = ts.TimeSeries(data = np.sin(np.arange(0,
+ ... 10*np.pi,10*np.pi/100)).reshape(2,50),
+ ... sampling_rate=np.pi)
+ >>> c1 = CorrelationAnalyzer(t1)
+ >>> c1 = CorrelationAnalyzer(t1)
+ >>> c1.corrcoef
+ array([[ 1., -1.],
+ [-1., 1.]])
+ >>> c1.xcorr.sampling_rate # doctest: +ELLIPSIS
+ 3.141592653... Hz
+ >>> c1.xcorr.t0 # doctest: +ELLIPSIS
+ -15.91549430915... s
+
+ """
+
+ BaseAnalyzer.__init__(self, input)
+
+ @desc.setattr_on_read
+ def corrcoef(self):
+ """The correlation coefficient between every pairwise combination of
+ time-series contained in the object"""
+ return np.corrcoef(self.input.data)
+
+ @desc.setattr_on_read
+ def xcorr(self):
+ """The cross-correlation between every pairwise combination time-series
+ in the object. Uses np.correlation('full').
+
+ Returns
+ -------
+
+ TimeSeries : the time-dependent cross-correlation, with zero-lag
+ at time=0
+
+ """
+ tseries_length = self.input.data.shape[0]
+ t_points = self.input.data.shape[-1]
+ xcorr = np.zeros((tseries_length,
+ tseries_length,
+ t_points * 2 - 1))
+ data = self.input.data
+ for i in range(tseries_length):
+ data_i = data[i]
+ for j in range(i, tseries_length):
+ xcorr[i, j] = np.correlate(data_i,
+ data[j],
+ mode='full')
+
+ idx = tril_indices(tseries_length, -1)
+ xcorr[idx[0], idx[1], ...] = xcorr[idx[1], idx[0], ...]
+
+ return ts.TimeSeries(xcorr,
+ sampling_interval=self.input.sampling_interval,
+ t0=-self.input.sampling_interval * t_points)
+
+ @desc.setattr_on_read
+ def xcorr_norm(self):
+ """The cross-correlation between every pairwise combination time-series
+ in the object, where the zero lag correlation is normalized to be equal
+ to the correlation coefficient between the time-series
+
+ Returns
+ -------
+
+ TimeSeries : A TimeSeries object
+ the time-dependent cross-correlation, with zero-lag at time=0
+
+ """
+
+ tseries_length = self.input.data.shape[0]
+ t_points = self.input.data.shape[-1]
+ xcorr = np.zeros((tseries_length,
+ tseries_length,
+ t_points * 2 - 1))
+ data = self.input.data
+ for i in range(tseries_length):
+ data_i = data[i]
+ for j in range(i, tseries_length):
+ xcorr[i, j] = np.correlate(data_i,
+ data[j],
+ mode='full')
+ xcorr[i, j] /= (xcorr[i, j, t_points])
+ xcorr[i, j] *= self.corrcoef[i, j]
+
+ idx = tril_indices(tseries_length, -1)
+ xcorr[idx[0], idx[1], ...] = xcorr[idx[1], idx[0], ...]
+
+ return ts.TimeSeries(xcorr,
+ sampling_interval=self.input.sampling_interval,
+ t0=-self.input.sampling_interval * t_points)
+
+
+class SeedCorrelationAnalyzer(object):
+ """
+ This analyzer takes two time-series. The first is designated as a
+ time-series of seeds. The other is designated as a time-series of targets.
+ The analyzer performs a correlation analysis between each of the channels
+ in the seed time-series and *all* of the channels in the target
+ time-series.
+
+ """
+ def __init__(self, seed_time_series=None, target_time_series=None):
+ """
+ Parameters
+ ----------
+
+ seed_time_series : a TimeSeries object
+
+ target_time_series : a TimeSeries object
+
+ """
+ self.seed = seed_time_series
+ self.target = target_time_series
+
+ @desc.setattr_on_read
+ def corrcoef(self):
+
+ #If there is more than one channel in the seed time-series:
+ if len(self.seed.shape) > 1:
+
+ # Preallocate results
+ Cxy = np.empty((self.seed.data.shape[0],
+ self.target.data.shape[0]), dtype=np.float)
+
+ for seed_idx, this_seed in enumerate(self.seed.data):
+
+ Cxy[seed_idx] = tsa.seed_corrcoef(this_seed, self.target.data)
+
+ #In the case where there is only one channel in the seed time-series:
+ else:
+ Cxy = tsa.seed_corrcoef(self.seed.data, self.target.data)
+
+ return Cxy.squeeze()
diff --git a/nitime/analysis/event_related.py b/nitime/analysis/event_related.py
new file mode 100644
index 0000000..ad6a324
--- /dev/null
+++ b/nitime/analysis/event_related.py
@@ -0,0 +1,388 @@
+import numpy as np
+from nitime.lazy import scipy_stats as stats
+
+from nitime import descriptors as desc
+from nitime import utils as tsu
+from nitime import algorithms as tsa
+from nitime import timeseries as ts
+
+
+class EventRelatedAnalyzer(desc.ResetMixin):
+ """Analyzer object for reverse-correlation/event-related analysis.
+
+ Note: right now, this class assumes the input time series is only
+ two-dimensional. If your input data is something like
+ (nchannels,nsubjects, ...) with more dimensions, things are likely to break
+ in hard to understand ways.
+ """
+
+ def __init__(self, time_series, events, len_et, zscore=False,
+ correct_baseline=False, offset=0):
+ """
+ Parameters
+ ----------
+ time_series : a time-series object
+ A time-series with data on which the event-related analysis proceeds
+
+ events_time_series : a TimeSeries object or an Events object
+ The events which occured in tandem with the time-series in the
+ EventRelatedAnalyzer. This object's data has to have the same
+ dimensions as the data in the EventRelatedAnalyzer object. In each
+ sample in the time-series, there is an integer, which denotes the
+ kind of event which occured at that time. In time-bins in which no
+ event occured, a 0 should be entered. The data in this time series
+ object needs to have the same dimensionality as the data in the
+ data time-series
+
+ len_et : int
+ The expected length of the event-triggered quantity (in the same
+ time-units as the events are represented (presumably number of TRs,
+ for fMRI data). For example, the size of the block dedicated in the
+ fir_matrix to each type of event
+
+ zscore : a flag to return the result in zscore (where relevant)
+
+ correct_baseline : a flag to correct the baseline according to the first
+ point in the event-triggered average (where possible)
+
+ offset : the offset of the beginning of the event-related time-series,
+ relative to the event occurence
+ """
+ #XXX Change so that the offset and length of the eta can be given in
+ #units of time
+
+ #Make sure that the offset and the len_et values can be used, by
+ #padding with zeros before and after:
+
+ if isinstance(events, ts.TimeSeries):
+ #Set a flag to indicate the input is a time-series object:
+ self._is_ts = True
+ s = time_series.data.shape
+ e_data = np.copy(events.data)
+
+ #If the input is a one-dimensional (instead of an n-channel
+ #dimensional) time-series, we will need to broadcast to make the
+ #data assume the same number of dimensions as the time-series
+ #input:
+ if len(events.shape) == 1 and len(s) > 1:
+ e_data = e_data + np.zeros((s[0], 1))
+
+ zeros_before = np.zeros((s[:-1] + (abs(offset),)))
+ zeros_after = np.zeros((s[:-1] + (abs(len_et),)))
+ time_series_data = np.hstack([zeros_before,
+ time_series.data,
+ zeros_after])
+ events_data = np.hstack([zeros_before,
+ e_data,
+ zeros_after])
+
+ #If the events and the time_series have more than 1-d, the analysis
+ #can traverse their first dimension
+ if time_series.data.ndim - 1 > 0:
+ self._len_h = time_series.data.shape[0]
+ self.events = events_data
+ self.data = time_series_data
+ #Otherwise, in order to extract the array from the first dimension,
+ #we wrap it in a list
+
+ else:
+ self._len_h = 1
+ self.events = [events_data]
+ self.data = [time_series_data]
+
+ elif isinstance(events, ts.Events):
+ self._is_ts = False
+ s = time_series.data.shape
+ zeros_before = np.zeros((s[:-1] + (abs(offset),)))
+ zeros_after = np.zeros((s[:-1] + (abs(len_et),)))
+
+ #If the time_series has more than 1-d, the analysis can traverse
+ #the first dimension
+ if time_series.data.ndim - 1 > 0:
+ self._len_h = time_series.shape[0]
+ self.data = time_series
+ self.events = events
+
+ #Otherwise, in order to extract the array from the first dimension,
+ #we wrap it in a list
+ else:
+ self._len_h = 1
+ self.data = [time_series]
+ #No need to do that for the Events object:
+ self.events = events
+ else:
+ err = ("Input 'events' to EventRelatedAnalyzer must be of type "
+ "Events or of type TimeSeries, %r given" % events)
+ raise ValueError(err)
+
+ self.sampling_rate = time_series.sampling_rate
+ self.sampling_interval = time_series.sampling_interval
+ self.len_et = int(len_et)
+ self._zscore = zscore
+ self._correct_baseline = correct_baseline
+ self.offset = offset
+ self.time_unit = time_series.time_unit
+
+ @desc.setattr_on_read
+ def FIR(self):
+ """Calculate the FIR event-related estimated of the HRFs for different
+ kinds of events
+
+ Returns
+ -------
+ A time-series object, shape[:-2] are dimensions corresponding to the to
+ shape[:-2] of the EventRelatedAnalyzer data, shape[-2] corresponds to
+ the different kinds of events used (ordered according to the sorted
+ order of the unique components in the events time-series). shape[-1]
+ corresponds to time, and has length = len_et
+
+ """
+ # XXX code needs to be changed to use flattening (see 'eta' below)
+
+ #Make a list to put the outputs in:
+ h = [0] * self._len_h
+
+ for i in range(self._len_h):
+ #XXX Check that the offset makes sense (there can't be an event
+ #happening within one offset duration of the beginning of the
+ #time-series:
+
+ #Get the design matrix (roll by the offset, in order to get the
+ #right thing):
+
+ roll_events = np.roll(self.events[i], self.offset)
+ design = tsu.fir_design_matrix(roll_events, self.len_et)
+ #Compute the fir estimate, in linear form:
+ this_h = tsa.fir(self.data[i], design)
+ #Reshape the linear fir estimate into a event_types*hrf_len array
+ u = np.unique(self.events[i])
+ event_types = u[np.unique(self.events[i]) != 0]
+ h[i] = np.reshape(this_h, (event_types.shape[0], self.len_et))
+
+ h = np.array(h).squeeze()
+
+ return ts.TimeSeries(data=h,
+ sampling_rate=self.sampling_rate,
+ t0=self.offset * self.sampling_interval,
+ time_unit=self.time_unit)
+
+ @desc.setattr_on_read
+ def FIR_estimate(self):
+ """Calculate back the LTI estimate of the time-series, from FIR"""
+ raise NotImplementedError
+
+ @desc.setattr_on_read
+ def xcorr_eta(self):
+ """Compute the normalized cross-correlation estimate of the HRFs for
+ different kinds of events
+
+ Returns
+ -------
+
+ A time-series object, shape[:-2] are dimensions corresponding to the to
+ shape[:-2] of the EventRelatedAnalyzer data, shape[-2] corresponds to
+ the different kinds of events used (ordered according to the sorted
+ order of the unique components in the events time-series). shape[-1]
+ corresponds to time, and has length = len_et (xcorr looks both back
+ and forward for half of this length)
+
+ """
+ #Make a list to put the outputs in:
+ h = [0] * self._len_h
+
+ for i in range(self._len_h):
+ data = self.data[i]
+ u = np.unique(self.events[i])
+ event_types = u[np.unique(self.events[i]) != 0]
+ h[i] = np.empty((event_types.shape[0],
+ self.len_et / 2),
+ dtype=complex)
+ for e_idx in range(event_types.shape[0]):
+ this_e = (self.events[i] == event_types[e_idx]) * 1.0
+ if self._zscore:
+ this_h = tsa.freq_domain_xcorr_zscored(data,
+ this_e,
+ -self.offset + 1,
+ self.len_et - self.offset - 2)
+ else:
+ this_h = tsa.freq_domain_xcorr(data,
+ this_e,
+ -self.offset + 1,
+ self.len_et - self.offset - 2)
+ h[i][e_idx] = this_h
+
+ h = np.array(h).squeeze()
+
+ ## t0 for the object returned here needs to be the central time, not
+ ## the first time point, because the functions 'look' back and forth
+ ## for len_et bins
+
+ return ts.TimeSeries(data=h,
+ sampling_rate=self.sampling_rate,
+ t0=-1 * self.len_et * self.sampling_interval,
+ time_unit=self.time_unit)
+
+ @desc.setattr_on_read
+ def et_data(self):
+ """The event-triggered data (all occurences).
+
+ This gets the time-series corresponding to the inidividual event
+ occurences. Returns a list of lists of time-series. The first dimension
+ is the different channels in the original time-series data and the
+ second dimension is each type of event in the event time series
+
+ The time-series itself has the first diemnsion of the data being the
+ specific occurence, with time 0 locked to the that occurence
+ of the event and the last dimension is time.e
+
+ This complicated structure is so that it can deal with situations where
+ each channel has different events and different events have different #
+ of occurences
+ """
+ #Make a list for the output
+ h = [0] * self._len_h
+
+ for i in range(self._len_h):
+ data = self.data[i]
+ u = np.unique(self.events[i])
+ event_types = u[np.unique(self.events[i]) != 0]
+ #Make a list in here as well:
+ this_list = [0] * event_types.shape[0]
+ for e_idx in range(event_types.shape[0]):
+ idx = np.where(self.events[i] == event_types[e_idx])
+
+ idx_w_len = np.array([idx[0] + count + self.offset for count
+ in range(self.len_et)])
+ event_trig = data[idx_w_len].T
+ this_list[e_idx] = ts.TimeSeries(data=event_trig,
+ sampling_interval=self.sampling_interval,
+ t0=self.offset * self.sampling_interval,
+ time_unit=self.time_unit)
+
+ h[i] = this_list
+
+ return h
+
+ @desc.setattr_on_read
+ def eta(self):
+ """The event-triggered average activity.
+ """
+ #Make a list for the output
+ h = [0] * self._len_h
+
+ if self._is_ts:
+ # Loop over channels
+ for i in range(self._len_h):
+ data = self.data[i]
+ u = np.unique(self.events[i])
+ event_types = u[np.unique(self.events[i]) != 0]
+ h[i] = np.empty((event_types.shape[0], self.len_et),
+ dtype=complex)
+
+ # This offset is used to pull the event indices below, but we
+ # have to broadcast it so the shape of the resulting idx+offset
+ # operation below gives us the (nevents, len_et) array we want,
+ # per channel.
+ offset = np.arange(self.offset,
+ self.offset + self.len_et)[:, np.newaxis]
+ # Loop over event types
+ for e_idx in range(event_types.shape[0]):
+ idx = np.where(self.events[i] == event_types[e_idx])[0]
+ event_trig = data[idx + offset]
+ #Correct baseline by removing the first point in the series
+ #for each channel:
+ if self._correct_baseline:
+ event_trig -= event_trig[0]
+
+ h[i][e_idx] = np.mean(event_trig, -1)
+
+ #In case the input events are an Events:
+ else:
+ #Get the indices necessary for extraction of the eta:
+ add_offset = np.arange(self.offset,
+ self.offset + self.len_et)[:, np.newaxis]
+
+ idx = (self.events.time / self.sampling_interval).astype(int)
+
+ #Make a list for the output
+ h = [0] * self._len_h
+
+ # Loop over channels
+ for i in range(self._len_h):
+ #If this is a list with one element:
+ if self._len_h == 1:
+ event_trig = self.data[0][idx + add_offset]
+ #Otherwise, you need to index straight into the underlying data
+ #array:
+ else:
+ event_trig = self.data.data[i][idx + add_offset]
+
+ h[i] = np.mean(event_trig, -1)
+
+ h = np.array(h).squeeze()
+ return ts.TimeSeries(data=h,
+ sampling_interval=self.sampling_interval,
+ t0=self.offset * self.sampling_interval,
+ time_unit=self.time_unit)
+
+ @desc.setattr_on_read
+ def ets(self):
+ """The event-triggered standard error of the mean """
+
+ #Make a list for the output
+ h = [0] * self._len_h
+
+ if self._is_ts:
+ # Loop over channels
+ for i in range(self._len_h):
+ data = self.data[i]
+ u = np.unique(self.events[i])
+ event_types = u[np.unique(self.events[i]) != 0]
+ h[i] = np.empty((event_types.shape[0], self.len_et),
+ dtype=complex)
+
+ # This offset is used to pull the event indices below, but we
+ # have to broadcast it so the shape of the resulting idx+offset
+ # operation below gives us the (nevents, len_et) array we want,
+ # per channel.
+ offset = np.arange(self.offset,
+ self.offset + self.len_et)[:, np.newaxis]
+ # Loop over event types
+ for e_idx in range(event_types.shape[0]):
+ idx = np.where(self.events[i] == event_types[e_idx])[0]
+ event_trig = data[idx + offset]
+ #Correct baseline by removing the first point in the series
+ #for each channel:
+ if self._correct_baseline:
+ event_trig -= event_trig[0]
+
+ h[i][e_idx] = stats.sem(event_trig, -1)
+
+ #In case the input events are an Events:
+ else:
+ #Get the indices necessary for extraction of the eta:
+ add_offset = np.arange(self.offset,
+ self.offset + self.len_et)[:, np.newaxis]
+
+ idx = (self.events.time / self.sampling_interval).astype(int)
+
+ #Make a list for the output
+ h = [0] * self._len_h
+
+ # Loop over channels
+ for i in range(self._len_h):
+ #If this is a list with one element:
+ if self._len_h == 1:
+ event_trig = self.data[0][idx + add_offset]
+ #Otherwise, you need to index straight into the underlying data
+ #array:
+ else:
+ event_trig = self.data.data[i][idx + add_offset]
+
+ h[i] = stats.sem(event_trig, -1)
+
+ h = np.array(h).squeeze()
+ return ts.TimeSeries(data=h,
+ sampling_interval=self.sampling_interval,
+ t0=self.offset * self.sampling_interval,
+ time_unit=self.time_unit)
diff --git a/nitime/analysis/granger.py b/nitime/analysis/granger.py
new file mode 100644
index 0000000..6e1efb2
--- /dev/null
+++ b/nitime/analysis/granger.py
@@ -0,0 +1,216 @@
+"""
+
+Analyzers for the calculation of Granger 'causality'
+
+"""
+
+import numpy as np
+import nitime.algorithms as alg
+import nitime.utils as utils
+from nitime import descriptors as desc
+
+from .base import BaseAnalyzer
+
+# To suppport older versions of numpy that don't have tril_indices:
+from nitime.index_utils import tril_indices_from
+
+def fit_model(x1, x2, order=None, max_order=10,
+ criterion=utils.bayesian_information_criterion):
+ """
+ Fit the auto-regressive model used in calculation of Granger 'causality'.
+
+ Parameters
+ ----------
+
+ x1,x2: float arrays (n)
+ x1,x2 bivariate combination.
+ order: int (optional)
+ If known, the order of the autoregressive process
+ max_order: int (optional)
+ If the order is not known, this will be the maximal order to fit.
+ criterion: callable
+ A function which defines an information criterion, used to determine the
+ order of the model.
+
+ """
+ c_old = np.inf
+ n_process = 2
+ Ntotal = n_process * x1.shape[-1]
+
+ # If model order was provided as an input:
+ if order is not None:
+ lag = order + 1
+ Rxx = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)
+ coef, ecov = alg.lwr_recursion(np.array(Rxx).transpose(2, 0, 1))
+
+ # If the model order is not known and provided as input:
+ else:
+ for lag in range(1, max_order):
+ Rxx_new = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)
+ coef_new, ecov_new = alg.lwr_recursion(
+ np.array(Rxx_new).transpose(2, 0, 1))
+ order_new = coef_new.shape[0]
+ c_new = criterion(ecov_new, n_process, order_new, Ntotal)
+ if c_new > c_old:
+ # Keep the values you got in the last round and break out:
+ break
+
+ else:
+ # Replace the output values with the new calculated values and
+ # move on to the next order:
+ c_old = c_new
+ order = order_new
+ Rxx = Rxx_new
+ coef = coef_new
+ ecov = ecov_new
+ else:
+ e_s = ("Model estimation order did not converge at max_order = %s"
+ % max_order)
+ raise ValueError(e_s)
+
+ return order, Rxx, coef, ecov
+
+
+class GrangerAnalyzer(BaseAnalyzer):
+ """Analyzer for computing all-to-all Granger 'causality' """
+ def __init__(self, input=None, ij=None, order=None, max_order=10,
+ criterion=utils.bayesian_information_criterion, n_freqs=1024):
+ """
+ Initializer for the GrangerAnalyzer.
+
+ Parameters
+ ----------
+
+ input: nitime TimeSeries object
+ ij: List of tuples of the form: [(0, 1), (0, 2)], etc.
+ These are the indices of pairs of time-series for which the
+ analysis will be done. Defaults to all vs. all.
+ order: int (optional)
+ The order of the process. If this is not known, it will be
+ estimated from the data, using the information criterion
+ max_order: if the order is estimated, this is the maximal order to
+ estimate for.
+ n_freqs: int (optional)
+ The size of the sampling grid in the frequency domain.
+ Defaults to 1024
+ criterion:
+ XXX
+ """
+ self.data = input.data
+ self.sampling_rate = input.sampling_rate
+ self._n_process = input.shape[0]
+ self._n_freqs = n_freqs
+ self._order = order
+ self._criterion = criterion
+ self._max_order = max_order
+ if ij is None:
+ # The following gets the full list of combinations of
+ # non-same i's and j's:
+ x, y = np.meshgrid(np.arange(self._n_process),
+ np.arange(self._n_process))
+ self.ij = list(zip(x[tril_indices_from(x, -1)],
+ y[tril_indices_from(y, -1)]))
+ else:
+ self.ij = ij
+
+ @desc.setattr_on_read
+ def _model(self):
+ model = dict(order={}, autocov={}, model_coef={}, error_cov={})
+ for i, j in self.ij:
+ model[i, j] = dict()
+ order_t, Rxx_t, coef_t, ecov_t = fit_model(self.data[i],
+ self.data[j],
+ order=self._order,
+ max_order=self._max_order,
+ criterion=self._criterion)
+ model['order'][i, j] = order_t
+ model['autocov'][i, j] = Rxx_t
+ model['model_coef'][i, j] = coef_t
+ model['error_cov'][i, j] = ecov_t
+
+ return model
+
+ @desc.setattr_on_read
+ def order(self):
+ if self._order is None:
+ return self._model['order']
+ else:
+ order = {}
+ for i, j in self.ij:
+ order[i, j] = self._order
+ return order
+
+ @desc.setattr_on_read
+ def autocov(self):
+ return self._model['autocov']
+
+ @desc.setattr_on_read
+ def model_coef(self):
+ return self._model['model_coef']
+
+ @desc.setattr_on_read
+ def error_cov(self):
+ return self._model['error_cov']
+
+ @desc.setattr_on_read
+ def _granger_causality(self):
+ """
+ This returns a dict with the values computed by
+ :func:`granger_causality_xy`, rather than arrays, so that we can delay
+ the allocation of arrays as much as possible.
+
+ """
+ gc = dict(frequencies={}, gc_xy={}, gc_yx={}, gc_sim={},
+ spectral_density={})
+ for i, j in self.ij:
+ w, f_x2y, f_y2x, f_xy, Sw = \
+ alg.granger_causality_xy(self.model_coef[i, j],
+ self.error_cov[i, j],
+ n_freqs=self._n_freqs)
+
+ # All other measures are dependent on i, j:
+ gc['gc_xy'][i, j] = f_x2y
+ gc['gc_yx'][i, j] = f_y2x
+ gc['gc_sim'][i, j] = f_xy
+ gc['spectral_density'][i, j] = Sw
+
+ return gc
+
+ @desc.setattr_on_read
+ def frequencies(self):
+ return utils.get_freqs(self.sampling_rate, self._n_freqs)
+
+ def _dict2arr(self, key):
+ """
+ A helper function that will generate an array with all nan's and insert
+ the measure defined by 'key' into the array and return it to the
+ calling function. This allows us to get matrices of the measures of
+ interest, instead of a dict.
+ """
+ # Prepare the matrix for the output:
+ arr = np.empty((self._n_process,
+ self._n_process,
+ self.frequencies.shape[0]))
+
+ arr.fill(np.nan)
+
+ # 'Translate' from dict form into matrix form:
+ for i, j in self.ij:
+ arr[j, i, :] = self._granger_causality[key][i, j]
+ return arr
+
+ @desc.setattr_on_read
+ def causality_xy(self):
+ return self._dict2arr('gc_xy')
+
+ @desc.setattr_on_read
+ def causality_yx(self):
+ return self._dict2arr('gc_yx')
+
+ @desc.setattr_on_read
+ def simultaneous_causality(self):
+ return self._dict2arr('gc_sim')
+
+ @desc.setattr_on_read
+ def spectral_matrix(self):
+ return self._granger_causality['spectral_density']
diff --git a/nitime/analysis/normalization.py b/nitime/analysis/normalization.py
new file mode 100644
index 0000000..d1df0d2
--- /dev/null
+++ b/nitime/analysis/normalization.py
@@ -0,0 +1,33 @@
+from nitime import descriptors as desc
+from nitime import utils as tsu
+from nitime import timeseries as ts
+
+from .base import BaseAnalyzer
+
+
+class NormalizationAnalyzer(BaseAnalyzer):
+ """ A class for performing normalization operations on time-series and
+ producing the renormalized versions of the time-series"""
+
+ def __init__(self, input=None):
+ """Constructor function for the Normalization analyzer class.
+
+ Parameters
+ ----------
+
+ input: TimeSeries object
+
+ """
+ BaseAnalyzer.__init__(self, input)
+
+ @desc.setattr_on_read
+ def percent_change(self):
+ return ts.TimeSeries(tsu.percent_change(self.input.data),
+ sampling_rate=self.input.sampling_rate,
+ time_unit=self.input.time_unit)
+
+ @desc.setattr_on_read
+ def z_score(self):
+ return ts.TimeSeries(tsu.zscore(self.input.data),
+ sampling_rate=self.input.sampling_rate,
+ time_unit=self.input.time_unit)
diff --git a/nitime/analysis/snr.py b/nitime/analysis/snr.py
new file mode 100644
index 0000000..a05183f
--- /dev/null
+++ b/nitime/analysis/snr.py
@@ -0,0 +1,148 @@
+import numpy as np
+from nitime.lazy import scipy_stats as stats
+
+from nitime import descriptors as desc
+from nitime import algorithms as tsa
+from nitime import timeseries as ts
+
+from nitime.index_utils import tril_indices_from
+
+
+from .base import BaseAnalyzer
+
+
+def signal_noise(response):
+ """
+ Signal and noise as defined in Borst and Theunissen 1999, Figure 2
+
+ Parameters
+ ----------
+
+ response: nitime TimeSeries object
+ The data here are individual responses of a single unit to the same
+ stimulus, with repetitions being the first dimension and time as the
+ last dimension
+ """
+
+ signal = np.mean(response.data, 0) # The estimate of the signal is the
+ # average response
+
+ noise = response.data - signal # Noise is the individual
+ # repetition's deviation from the
+ # estimate of the signal
+
+ # Return TimeSeries objects with the sampling rate of the input:
+ return (ts.TimeSeries(signal, sampling_rate=response.sampling_rate),
+ ts.TimeSeries(noise, sampling_rate=response.sampling_rate))
+
+
+class SNRAnalyzer(BaseAnalyzer):
+ """
+ Calculate SNR for a response to repetitions of the same stimulus, according
+ to (Borst, 1999) (Figure 2) and (Hsu, 2004).
+
+ Hsu A, Borst A and Theunissen, FE (2004) Quantifying variability in neural
+ responses ans its application for the validation of model
+ predictions. Network: Comput Neural Syst 15:91-109
+
+ Borst A and Theunissen FE (1999) Information theory and neural coding. Nat
+ Neurosci 2:947-957
+ """
+ def __init__(self, input=None, bandwidth=None, adaptive=False,
+ low_bias=False):
+ """
+ Initializer for the multi_taper_SNR object
+
+ Parameters
+ ----------
+ input: TimeSeries object
+
+ bandwidth: float,
+ The bandwidth of the windowing function will determine the number
+ tapers to use. This parameters represents trade-off between
+ frequency resolution (lower main lobe bandwidth for the taper) and
+ variance reduction (higher bandwidth and number of averaged
+ estimates). Per default will be set to 4 times the fundamental
+ frequency, such that NW=4
+
+ adaptive: bool, default to False
+ Whether to set the weights for the tapered spectra according to the
+ adaptive algorithm (Thompson, 2007).
+
+ low_bias : bool, default to False
+ Rather than use 2NW tapers, only use the tapers that have better
+ than 90% spectral concentration within the bandwidth (still using a
+ maximum of 2NW tapers)
+
+ Notes
+ -----
+
+ Thompson, DJ (2007) Jackknifing multitaper spectrum estimates. IEEE
+ Signal Processing Magazing. 24: 20-30
+
+ """
+ self.input = input
+ self.signal, self.noise = signal_noise(input)
+ self.bandwidth = bandwidth
+ self.adaptive = adaptive
+ self.low_bias = low_bias
+
+ @desc.setattr_on_read
+ def mt_frequencies(self):
+ return np.linspace(0, self.input.sampling_rate / 2,
+ self.input.data.shape[-1] / 2 + 1)
+
+ @desc.setattr_on_read
+ def mt_signal_psd(self):
+ _, p, _ = tsa.multi_taper_psd(self.signal.data,
+ Fs=self.input.sampling_rate,
+ BW=self.bandwidth,
+ adaptive=self.adaptive,
+ low_bias=self.low_bias)
+ return p
+
+ @desc.setattr_on_read
+ def mt_noise_psd(self):
+ p = np.empty((self.noise.data.shape[0],
+ self.noise.data.shape[-1] / 2 + 1))
+
+ for i in range(p.shape[0]):
+ _, p[i], _ = tsa.multi_taper_psd(self.noise.data[i],
+ Fs=self.input.sampling_rate,
+ BW=self.bandwidth,
+ adaptive=self.adaptive,
+ low_bias=self.low_bias)
+ return np.mean(p, 0)
+
+ @desc.setattr_on_read
+ def mt_coherence(self):
+ """ """
+ return self.mt_signal_psd / (self.mt_signal_psd + self.mt_noise_psd)
+
+ @desc.setattr_on_read
+ def mt_information(self):
+ df = self.mt_frequencies[1] - self.mt_frequencies[0]
+ return -1 * np.log2(1 - self.mt_coherence) * df
+ #These two formulations should be equivalent
+ #return np.log2(1+self.mt_snr)
+
+ @desc.setattr_on_read
+ def mt_snr(self):
+ return self.mt_signal_psd / self.mt_noise_psd
+
+ @desc.setattr_on_read
+ def correlation(self):
+ """
+ The correlation between all combinations of trials
+
+ Returns
+ -------
+ (r,e) : tuple
+ r is the mean correlation and e is the mean error of the correlation
+ (with df = n_trials - 1)
+ """
+
+ c = np.corrcoef(self.input.data)
+ c = c[tril_indices_from(c, -1)]
+
+ return np.mean(c), stats.sem(c)
diff --git a/nitime/analysis/spectral.py b/nitime/analysis/spectral.py
new file mode 100644
index 0000000..02b5410
--- /dev/null
+++ b/nitime/analysis/spectral.py
@@ -0,0 +1,660 @@
+
+import numpy as np
+from nitime.lazy import scipy
+from nitime.lazy import scipy_signal as signal
+from nitime.lazy import scipy_fftpack as fftpack
+
+from nitime import descriptors as desc
+from nitime import utils as tsu
+from nitime import algorithms as tsa
+from nitime import timeseries as ts
+
+from .base import BaseAnalyzer
+
+
+class SpectralAnalyzer(BaseAnalyzer):
+ """ Analyzer object for spectral analysis"""
+ def __init__(self, input=None, method=None, BW=None, adaptive=False,
+ low_bias=False):
+ """
+ The initialization of the
+
+ Parameters
+ ----------
+ input: time-series objects
+
+ method: dict (optional),
+ The method spec used in calculating 'psd' see
+ :func:`algorithms.get_spectra` for details.
+
+ BW: float (optional),
+ In 'spectrum_multi_taper' The bandwidth of the windowing function
+ will determine the number tapers to use. This parameters represents
+ trade-off between frequency resolution (lower main lobe BW for the
+ taper) and variance reduction (higher BW and number of averaged
+ estimates).
+
+ adaptive : {True/False}
+ In 'spectrum_multi_taper', use an adaptive weighting routine to
+ combine the PSD estimates of different tapers.
+
+ low_bias: {True/False}
+ In spectrum_multi_taper, use bias correction
+
+
+ Examples
+ --------
+ >>> np.set_printoptions(precision=4) # for doctesting
+ >>> t1 = ts.TimeSeries(data = np.arange(0,1024,1).reshape(2,512),
+ ... sampling_rate=np.pi)
+ >>> s1 = SpectralAnalyzer(t1)
+ >>> s1.method['this_method']
+ 'welch'
+ >>> s1.method['Fs'] # doctest: +ELLIPSIS
+ 3.1415926535... Hz
+ >>> f,s = s1.psd
+ >>> f
+ array([ 0. , 0.0491, 0.0982, 0.1473, 0.1963, 0.2454, 0.2945,
+ 0.3436, 0.3927, 0.4418, 0.4909, 0.54 , 0.589 , 0.6381,
+ 0.6872, 0.7363, 0.7854, 0.8345, 0.8836, 0.9327, 0.9817,
+ 1.0308, 1.0799, 1.129 , 1.1781, 1.2272, 1.2763, 1.3254,
+ 1.3744, 1.4235, 1.4726, 1.5217, 1.5708])
+ >>> s[0,0] # doctest: +ELLIPSIS
+ 1128276.92538360...
+ """
+ BaseAnalyzer.__init__(self, input)
+
+ self.method = method
+
+ if self.method is None:
+ self.method = {'this_method': 'welch',
+ 'Fs': self.input.sampling_rate}
+
+ self.BW = BW
+ self.adaptive = adaptive
+ self.low_bias = low_bias
+
+ @desc.setattr_on_read
+ def psd(self):
+ """
+ The standard output for this analyzer is a tuple f,s, where: f is the
+ frequency bands associated with the discrete spectral components
+ and s is the PSD calculated using :func:`mlab.psd`.
+
+ """
+ NFFT = self.method.get('NFFT', 64)
+ Fs = self.input.sampling_rate
+ detrend = self.method.get('detrend', tsa.mlab.detrend_none)
+ window = self.method.get('window', tsa.mlab.window_hanning)
+ n_overlap = self.method.get('n_overlap', int(np.ceil(NFFT / 2.0)))
+
+ if np.iscomplexobj(self.input.data):
+ psd_len = NFFT
+ dt = complex
+ else:
+ psd_len = NFFT / 2.0 + 1
+ dt = float
+
+ #If multi-channel data:
+ if len(self.input.data.shape) > 1:
+ psd_shape = (self.input.shape[:-1] + (psd_len,))
+ flat_data = np.reshape(self.input.data, (-1,
+ self.input.data.shape[-1]))
+ flat_psd = np.empty((flat_data.shape[0], psd_len), dtype=dt)
+ for i in range(flat_data.shape[0]):
+ #'f' are the center frequencies of the frequency bands
+ #represented in the psd. These are identical in each iteration
+ #of the loop, so they get reassigned into the same variable in
+ #each iteration:
+ temp, f = tsa.mlab.psd(flat_data[i],
+ NFFT=NFFT,
+ Fs=Fs,
+ detrend=detrend,
+ window=window,
+ noverlap=n_overlap)
+ flat_psd[i] = temp.squeeze()
+ psd = np.reshape(flat_psd, psd_shape).squeeze()
+
+ else:
+ psd, f = tsa.mlab.psd(self.input.data,
+ NFFT=NFFT,
+ Fs=Fs,
+ detrend=detrend,
+ window=window,
+ noverlap=n_overlap)
+
+ return f, psd
+
+ @desc.setattr_on_read
+ def cpsd(self):
+ """
+ This outputs both the PSD and the CSD calculated using
+ :func:`algorithms.get_spectra`.
+
+ Returns
+ -------
+
+ (f,s): tuple
+ f: Frequency bands over which the psd/csd are calculated and
+ s: the n by n by len(f) matrix of PSD (on the main diagonal) and CSD
+ (off diagonal)
+ """
+ self.welch_method = self.method
+ self.welch_method['this_method'] = 'welch'
+ self.welch_method['Fs'] = self.input.sampling_rate
+ f, spectrum_welch = tsa.get_spectra(self.input.data,
+ method=self.welch_method)
+
+ return f, spectrum_welch
+
+ @desc.setattr_on_read
+ def periodogram(self):
+ """
+
+ This is the spectrum estimated as the FFT of the time-series
+
+ Returns
+ -------
+ (f,spectrum): f is an array with the frequencies and spectrum is the
+ complex-valued FFT.
+ """
+ return tsa.periodogram(self.input.data,
+ Fs=self.input.sampling_rate)
+
+ @desc.setattr_on_read
+ def spectrum_fourier(self):
+ """
+
+ This is the spectrum estimated as the FFT of the time-series
+
+ Returns
+ -------
+ (f,spectrum): f is an array with the frequencies and spectrum is the
+ complex-valued FFT.
+
+ """
+
+ data = self.input.data
+ sampling_rate = self.input.sampling_rate
+
+ fft = fftpack.fft
+ if np.any(np.iscomplex(data)):
+ # Get negative frequencies, as well as positive:
+ f = np.linspace(-sampling_rate/2., sampling_rate/2., data.shape[-1])
+ spectrum_fourier = np.fft.fftshift(fft(data))
+ else:
+ f = tsu.get_freqs(sampling_rate, data.shape[-1])
+ spectrum_fourier = fft(data)[..., :f.shape[0]]
+
+ return f, spectrum_fourier
+
+ @desc.setattr_on_read
+ def spectrum_multi_taper(self):
+ """
+
+ The spectrum and cross-spectra, computed using
+ :func:`multi_taper_csd'
+
+ """
+ if np.iscomplexobj(self.input.data):
+ psd_len = self.input.shape[-1]
+ dt = complex
+ else:
+ psd_len = self.input.shape[-1] / 2 + 1
+ dt = float
+
+ #Initialize the output
+ spectrum_multi_taper = np.empty((self.input.shape[:-1] + (psd_len,)),
+ dtype=dt)
+
+ #If multi-channel data:
+ if len(self.input.data.shape) > 1:
+ for i in range(self.input.data.shape[0]):
+ # 'f' are the center frequencies of the frequency bands
+ # represented in the MT psd. These are identical in each
+ # iteration of the loop, so they get reassigned into the same
+ # variable in each iteration:
+ f, spectrum_multi_taper[i], _ = tsa.multi_taper_psd(
+ self.input.data[i],
+ Fs=self.input.sampling_rate,
+ BW=self.BW,
+ adaptive=self.adaptive,
+ low_bias=self.low_bias)
+ else:
+ f, spectrum_multi_taper, _ = tsa.multi_taper_psd(self.input.data,
+ Fs=self.input.sampling_rate,
+ BW=self.BW,
+ adaptive=self.adaptive,
+ low_bias=self.low_bias)
+
+ return f, spectrum_multi_taper
+
+
+class FilterAnalyzer(desc.ResetMixin):
+ """ A class for performing filtering operations on time-series and
+ producing the filtered versions of the time-series
+
+ Parameters
+ ----------
+
+ time_series: A nitime TimeSeries object.
+
+ lb,ub: float (optional)
+ Lower and upper band of a pass-band into which the data will be
+ filtered. Default: 0, Nyquist
+
+ boxcar_iterations: int (optional)
+ For box-car filtering, how many times to iterate over the data while
+ convolving with a box-car function. Default: 2
+
+ gpass: float (optional)
+ For iir filtering, the pass-band maximal ripple loss (default: 1)
+
+ gstop: float (optional)
+ For iir filtering, the stop-band minimal attenuation (default: 60).
+
+ filt_order: int (optional)
+ For iir/fir filtering, the order of the filter. Note for fir filtering,
+ this needs to be an even number. Default: 64
+
+ iir_ftype: str (optional)
+ The type of filter to be used in iir filtering (see
+ scipy.signal.iirdesign for details). Default 'ellip'
+
+ fir_win: str
+ The window to be used in fir filtering (see scipy.signal.firwin for
+ details). Default: 'hamming'
+
+ Note
+ ----
+ All filtering methods used here keep the original DC component of the
+ signal.
+
+ """
+ def __init__(self, time_series, lb=0, ub=None, boxcar_iterations=2,
+ filt_order=64, gpass=1, gstop=60, iir_ftype='ellip',
+ fir_win='hamming'):
+
+ #Initialize all the local variables you will need for all the different
+ #filtering methods:
+ self.data = time_series.data
+ self.sampling_rate = time_series.sampling_rate
+ self.ub = ub
+ self.lb = lb
+ self.time_unit = time_series.time_unit
+ self._boxcar_iterations = boxcar_iterations
+ self._gstop = gstop
+ self._gpass = gpass
+ self._filt_order = filt_order
+ self._ftype = iir_ftype
+ self._win = fir_win
+
+ def filtfilt(self, b, a, in_ts=None):
+
+ """
+ Zero-phase delay filtering (either iir or fir).
+
+ Parameters
+ ----------
+
+ a,b: filter coefficients
+
+ in_ts: time-series object.
+ This allows to replace the input. Instead of analyzing this
+ analyzers input data, analyze some other time-series object
+
+ Note
+ ----
+
+ This is a wrapper around scipy.signal.filtfilt
+
+ """
+ # Switch in the new in_ts:
+ if in_ts is not None:
+ data = in_ts.data
+ Fs = in_ts.sampling_rate
+ else:
+ data = self.data
+ Fs = self.sampling_rate
+
+ #filtfilt only operates channel-by-channel, so we need to loop over the
+ #channels, if the data is multi-channel data:
+ if len(data.shape) > 1:
+ out_data = np.empty(data.shape, dtype=data.dtype)
+ for i in range(data.shape[0]):
+ out_data[i] = signal.filtfilt(b, a, data[i])
+ #Make sure to preserve the DC:
+ dc = np.mean(data[i])
+ out_data[i] -= np.mean(out_data[i])
+ out_data[i] += dc
+ else:
+ out_data = signal.filtfilt(b, a, data)
+ #Make sure to preserve the DC:
+ dc = np.mean(data)
+ out_data -= np.mean(out_data)
+ out_data += dc
+
+ return ts.TimeSeries(out_data,
+ sampling_rate=Fs,
+ time_unit=self.time_unit)
+
+ @desc.setattr_on_read
+ def fir(self):
+ """
+ Filter the time-series using an FIR digital filter. Filtering is done
+ back and forth (using scipy.signal.filtfilt) to achieve zero phase
+ delay
+ """
+ #Passband and stop-band are expressed as fraction of the Nyquist
+ #frequency:
+ if self.ub is not None:
+ ub_frac = self.ub / (self.sampling_rate / 2.)
+ else:
+ ub_frac = 1.0
+
+ lb_frac = self.lb / (self.sampling_rate / 2.)
+
+ if lb_frac < 0 or ub_frac > 1:
+ e_s = "The lower-bound or upper bound used to filter"
+ e_s += " are beyond the range 0-Nyquist. You asked for"
+ e_s += " a filter between"
+ e_s += "%s and %s percent of" % (lb_frac * 100, ub_frac * 100)
+ e_s += "the Nyquist frequency"
+ raise ValueError(e_s)
+
+ n_taps = self._filt_order + 1
+
+ #This means the filter order you chose was too large (needs to be
+ #shorter than a 1/3 of your time-series )
+ if n_taps > self.data.shape[-1] * 3:
+ e_s = "The filter order chosen is too large for this time-series"
+ raise ValueError(e_s)
+
+ # a is always 1:
+ a = [1]
+
+ sig = ts.TimeSeries(data=self.data, sampling_rate=self.sampling_rate)
+
+ #Lowpass:
+ if ub_frac < 1:
+ b = signal.firwin(n_taps, ub_frac, window=self._win)
+ sig = self.filtfilt(b, a, sig)
+
+ #High-pass
+ if lb_frac > 0:
+ #Includes a spectral inversion:
+ b = -1 * signal.firwin(n_taps, lb_frac, window=self._win)
+ b[n_taps / 2] = b[n_taps / 2] + 1
+ sig = self.filtfilt(b, a, sig)
+
+ return sig
+
+ @desc.setattr_on_read
+ def iir(self):
+ """
+ Filter the time-series using an IIR filter. Filtering is done back and
+ forth (using scipy.signal.filtfilt) to achieve zero phase delay
+
+ """
+
+ #Passband and stop-band are expressed as fraction of the Nyquist
+ #frequency:
+ if self.ub is not None:
+ ub_frac = self.ub / (self.sampling_rate / 2.)
+ else:
+ ub_frac = 1.0
+
+ lb_frac = self.lb / (self.sampling_rate / 2.)
+
+ # For the band-pass:
+ if lb_frac > 0 and ub_frac < 1:
+
+ wp = [lb_frac, ub_frac]
+
+ ws = [np.max([lb_frac - 0.1, 0]),
+ np.min([ub_frac + 0.1, 1.0])]
+
+ # For the lowpass:
+ elif lb_frac == 0:
+ wp = ub_frac
+ ws = np.min([ub_frac + 0.1, 0.9])
+
+ # For the highpass:
+ elif ub_frac == 1:
+ wp = lb_frac
+ ws = np.max([lb_frac - 0.1, 0.1])
+
+ b, a = signal.iirdesign(wp, ws, self._gpass, self._gstop,
+ ftype=self._ftype)
+
+ return self.filtfilt(b, a)
+
+ @desc.setattr_on_read
+ def filtered_fourier(self):
+ """
+
+ Filter the time-series by passing it to the Fourier domain and null
+ out the frequency bands outside of the range [lb,ub]
+
+ """
+
+ freqs = tsu.get_freqs(self.sampling_rate, self.data.shape[-1])
+
+ if self.ub is None:
+ self.ub = freqs[-1]
+
+ power = fftpack.fft(self.data)
+ idx_0 = np.hstack([np.where(freqs < self.lb)[0],
+ np.where(freqs > self.ub)[0]])
+
+ #Make sure that you keep the DC component:
+ keep_dc = np.copy(power[..., 0])
+ power[..., idx_0] = 0
+ power[..., -1 * idx_0] = 0 # Take care of the negative frequencies
+ power[..., 0] = keep_dc # And put the DC back in when you're done:
+
+ data_out = fftpack.ifft(power)
+
+ data_out = np.real(data_out) # In order to make sure that you are not
+ # left with float-precision residual
+ # complex parts
+
+ return ts.TimeSeries(data=data_out,
+ sampling_rate=self.sampling_rate,
+ time_unit=self.time_unit)
+
+ @desc.setattr_on_read
+ def filtered_boxcar(self):
+ """
+ Filter the time-series by a boxcar filter.
+
+ The low pass filter is implemented by convolving with a boxcar function
+ of the right length and amplitude and the high-pass filter is
+ implemented by subtracting a low-pass version (as above) from the
+ signal
+ """
+
+ if self.ub is not None:
+ ub = self.ub / self.sampling_rate
+ else:
+ ub = 1.0
+
+ lb = self.lb / self.sampling_rate
+
+ data_out = tsa.boxcar_filter(np.copy(self.data),
+ lb=lb, ub=ub,
+ n_iterations=self._boxcar_iterations)
+
+ return ts.TimeSeries(data=data_out,
+ sampling_rate=self.sampling_rate,
+ time_unit=self.time_unit)
+
+
+class HilbertAnalyzer(BaseAnalyzer):
+
+ """Analyzer class for extracting the Hilbert transform """
+
+ def __init__(self, input=None):
+ """Constructor function for the Hilbert analyzer class.
+
+ Parameters
+ ----------
+
+ input: TimeSeries
+
+ """
+ BaseAnalyzer.__init__(self, input)
+
+ @desc.setattr_on_read
+ def analytic(self):
+ """The natural output for this analyzer is the analytic signal """
+ data = self.input.data
+ sampling_rate = self.input.sampling_rate
+ #If you have scipy with the fixed scipy.signal.hilbert (r6205 and
+ #later)
+ if scipy.__version__ >= '0.9':
+ hilbert = signal.hilbert
+ else:
+ hilbert = tsu.hilbert_from_new_scipy
+
+ return ts.TimeSeries(data=hilbert(data),
+ sampling_rate=sampling_rate)
+
+ @desc.setattr_on_read
+ def amplitude(self):
+ return ts.TimeSeries(data=np.abs(self.analytic.data),
+ sampling_rate=self.analytic.sampling_rate)
+
+ @desc.setattr_on_read
+ def phase(self):
+ return ts.TimeSeries(data=np.angle(self.analytic.data),
+ sampling_rate=self.analytic.sampling_rate)
+
+ @desc.setattr_on_read
+ def real(self):
+ return ts.TimeSeries(data=self.analytic.data.real,
+ sampling_rate=self.analytic.sampling_rate)
+
+ @desc.setattr_on_read
+ def imag(self):
+ return ts.TimeSeries(data=self.analytic.data.imag,
+ sampling_rate=self.analytic.sampling_rate)
+
+
+class MorletWaveletAnalyzer(BaseAnalyzer):
+
+ """Analyzer class for extracting the (complex) Morlet wavelet transform """
+
+ def __init__(self, input=None, freqs=None, sd_rel=.2, sd=None, f_min=None,
+ f_max=None, nfreqs=None, log_spacing=False, log_morlet=False):
+ """Constructor function for the Wavelet analyzer class.
+
+ Parameters
+ ----------
+
+ freqs: list or float
+ List of center frequencies for the wavelet transform, or a scalar
+ for a single band-passed signal.
+
+ sd: list or float
+ List of filter bandwidths, given as standard-deviation of center
+ frequencies. Alternatively sd_rel can be specified.
+
+ sd_rel: float
+ Filter bandwidth, given as a fraction of the center frequencies.
+
+ f_min: float
+ Minimal frequency.
+
+ f_max: float
+ Maximal frequency.
+
+ nfreqs: int
+ Number of frequencies.
+
+ log_spacing: bool
+ If true, frequencies will be evenly spaced on a log-scale.
+ Default: False
+
+ log_morlet: bool
+ If True, a log-Morlet wavelet is used, if False, a regular Morlet
+ wavelet is used. Default: False
+ """
+ BaseAnalyzer.__init__(self, input)
+ self.freqs = freqs
+ self.sd_rel = sd_rel
+ self.sd = sd
+ self.f_min = f_min
+ self.f_max = f_max
+ self.nfreqs = nfreqs
+ self.log_spacing = log_spacing
+ self.log_morlet = log_morlet
+
+ if log_morlet:
+ self.wavelet = tsa.wlogmorlet
+ else:
+ self.wavelet = tsa.wmorlet
+
+ if freqs is not None:
+ self.freqs = np.array(freqs)
+ elif f_min is not None and f_max is not None and nfreqs is not None:
+ if log_spacing:
+ self.freqs = np.logspace(np.log10(f_min), np.log10(f_max),
+ num=nfreqs, endpoint=True)
+ else:
+ self.freqs = np.linspace(f_min, f_max, num=nfreqs,
+ endpoint=True)
+ else:
+ raise NotImplementedError
+
+ if sd is None:
+ self.sd = self.freqs * self.sd_rel
+
+ @desc.setattr_on_read
+ def analytic(self):
+ """The natural output for this analyzer is the analytic signal"""
+ data = self.input.data
+ sampling_rate = self.input.sampling_rate
+
+ a_signal =\
+ ts.TimeSeries(data=np.zeros(self.freqs.shape + data.shape,
+ dtype='D'), sampling_rate=sampling_rate)
+ if self.freqs.ndim == 0:
+ w = self.wavelet(self.freqs, self.sd,
+ sampling_rate=sampling_rate, ns=5,
+ normed='area')
+
+ # nd = (w.shape[0] - 1) / 2
+ a_signal.data[...] = (np.convolve(data, np.real(w), mode='same') +
+ 1j * np.convolve(data, np.imag(w), mode='same'))
+ else:
+ for i, (f, sd) in enumerate(zip(self.freqs, self.sd)):
+ w = self.wavelet(f, sd, sampling_rate=sampling_rate,
+ ns=5, normed='area')
+
+ # nd = (w.shape[0] - 1) / 2
+ a_signal.data[i, ...] = (
+ np.convolve(data, np.real(w), mode='same') +
+ 1j * np.convolve(data, np.imag(w), mode='same'))
+
+ return a_signal
+
+ @desc.setattr_on_read
+ def amplitude(self):
+ return ts.TimeSeries(data=np.abs(self.analytic.data),
+ sampling_rate=self.analytic.sampling_rate)
+
+ @desc.setattr_on_read
+ def phase(self):
+ return ts.TimeSeries(data=np.angle(self.analytic.data),
+ sampling_rate=self.analytic.sampling_rate)
+
+ @desc.setattr_on_read
+ def real(self):
+ return ts.TimeSeries(data=self.analytic.data.real,
+ sampling_rate=self.analytic.sampling_rate)
+
+ @desc.setattr_on_read
+ def imag(self):
+ return ts.TimeSeries(data=self.analytic.data.imag,
+ sampling_rate=self.analytic.sampling_rate)
diff --git a/nitime/analysis/tests/__init__.py b/nitime/analysis/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/nitime/analysis/tests/__init__.py
diff --git a/nitime/analysis/tests/test_base.py b/nitime/analysis/tests/test_base.py
new file mode 100644
index 0000000..4784859
--- /dev/null
+++ b/nitime/analysis/tests/test_base.py
@@ -0,0 +1,21 @@
+
+from nitime.analysis.base import BaseAnalyzer
+import numpy.testing as npt
+
+
+def test_base():
+ """Testing BaseAnalyzer"""
+
+ empty_dict = {}
+ input1 = '123'
+ A = BaseAnalyzer(input=input1)
+
+ npt.assert_equal(A.input, input1)
+ npt.assert_equal(A.parameters, empty_dict)
+
+ input2 = '456'
+ A.set_input(input2)
+
+ npt.assert_equal(A.input, input2)
+
+ npt.assert_equal(A.__repr__(), 'BaseAnalyzer()')
diff --git a/nitime/analysis/tests/test_coherence.py b/nitime/analysis/tests/test_coherence.py
new file mode 100644
index 0000000..ff528c8
--- /dev/null
+++ b/nitime/analysis/tests/test_coherence.py
@@ -0,0 +1,210 @@
+import warnings
+
+import numpy as np
+import numpy.testing as npt
+import matplotlib
+import matplotlib.mlab as mlab
+
+import nitime.timeseries as ts
+import nitime.analysis as nta
+
+import platform
+
+# Some tests might require python version 2.5 or above:
+if float(platform.python_version()[:3]) < 2.5:
+ old_python = True
+else:
+ old_python = False
+
+# Matplotlib older than 0.99 will have some issues with the normalization of t
+
+if float(matplotlib.__version__[:3]) < 0.99:
+ w_s = "You have a relatively old version of Matplotlib. "
+ w_s += " Estimation of the PSD DC component might not be as expected"
+ w_s += " Consider updating Matplotlib: http://matplotlib.sourceforge.net/"
+ warnings.warn(w_s, Warning)
+ old_mpl = True
+else:
+ old_mpl = False
+
+def test_CoherenceAnalyzer():
+ methods = (None,
+ {"this_method": 'welch', "NFFT": 256},
+ {"this_method": 'multi_taper_csd'},
+ {"this_method": 'periodogram_csd', "NFFT": 256})
+
+ Fs = np.pi
+ t = np.arange(1024)
+ x = np.sin(10 * t) + np.random.rand(t.shape[-1])
+ y = np.sin(10 * t) + np.random.rand(t.shape[-1])
+ # Third time-series used for calculation of partial coherence:
+ z = np.sin(10 * t)
+ T = ts.TimeSeries(np.vstack([x, y, z]), sampling_rate=np.pi)
+ n_series = T.shape[0]
+ for unwrap in [True, False]:
+ for method in methods:
+ C = nta.CoherenceAnalyzer(T, method, unwrap_phases=unwrap)
+ if method is None:
+ # This is the default behavior (grab the NFFT from the number
+ # of frequencies):
+ npt.assert_equal(C.coherence.shape, (n_series, n_series,
+ C.frequencies.shape[0]))
+
+ elif (method['this_method'] == 'welch' or
+ method['this_method'] == 'periodogram_csd'):
+ npt.assert_equal(C.coherence.shape, (n_series, n_series,
+ method['NFFT'] // 2 + 1))
+ else:
+ npt.assert_equal(C.coherence.shape, (n_series, n_series,
+ len(t) // 2 + 1))
+
+ # Coherence symmetry:
+ npt.assert_equal(C.coherence[0, 1], C.coherence[1, 0])
+
+ # Phase/delay asymmetry:
+ npt.assert_equal(C.phase[0, 1], -1 * C.phase[1, 0])
+
+ # The very first one is a nan, test from second and onwards:
+ npt.assert_almost_equal(C.delay[0, 1][1:], -1 * C.delay[1, 0][1:])
+
+ if method is not None and method['this_method'] == 'welch':
+ S = nta.SpectralAnalyzer(T, method)
+ npt.assert_almost_equal(S.cpsd[0], C.frequencies)
+ npt.assert_almost_equal(S.cpsd[1], C.spectrum)
+ # Test that partial coherence runs through and has the right number
+ # of dimensions:
+ npt.assert_equal(len(C.coherence_partial.shape), 4)
+
+
+@npt.dec.skipif(old_mpl)
+def tes