summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.git_archival.txt4
-rw-r--r--.gitattributes1
-rw-r--r--.github/workflows/test.yml49
-rw-r--r--.github/workflows/wheels.yml143
-rw-r--r--.gitignore14
-rw-r--r--.travis.yml33
-rw-r--r--MANIFEST.in18
-rw-r--r--README.txt4
-rw-r--r--appveyor.yml39
-rw-r--r--debian/blends9
-rw-r--r--debian/changelog190
-rw-r--r--debian/compat1
-rw-r--r--debian/control62
-rw-r--r--debian/copyright28
-rw-r--r--debian/gbp.conf18
-rw-r--r--debian/patches/deb_no_sources_for_docs8
-rw-r--r--debian/patches/demote-cython.patch20
-rw-r--r--debian/patches/matplotlib.patch14
-rw-r--r--debian/patches/numpydoc_1.24.patch18
-rw-r--r--debian/patches/python397
-rw-r--r--debian/patches/series7
-rw-r--r--debian/patches/sphinx-conf-fix.patch28
-rw-r--r--debian/patches/sphinx_ignore_github.patch15
-rw-r--r--debian/patches/up_version_info_python2.622
-rw-r--r--debian/python-nitime-doc.lintian-overrides2
-rw-r--r--debian/python3-nitime.install (renamed from debian/python-nitime.install)0
-rwxr-xr-xdebian/rules35
-rw-r--r--debian/salsa-ci.yml4
-rw-r--r--debian/upstream/metadata7
-rw-r--r--debian/watch6
-rw-r--r--doc/Makefile42
-rw-r--r--doc/conf.py45
-rw-r--r--doc/discussion/base_classes.rst6
-rw-r--r--doc/discussion/interval_object.rst2
-rw-r--r--doc/discussion/time_series_access.rst2
-rw-r--r--doc/examples/.gitignore1
-rw-r--r--[-rwxr-xr-x]doc/examples/ar_est_1var.py0
-rw-r--r--doc/examples/ar_est_2vars.py19
-rw-r--r--doc/examples/ar_est_3vars.py8
-rw-r--r--doc/examples/ar_model_fit.py17
-rw-r--r--doc/examples/event_related_fmri.py17
-rw-r--r--doc/examples/filtering_fmri.py4
-rw-r--r--doc/examples/granger_fmri.py17
-rw-r--r--doc/examples/grasshopper.py2
-rw-r--r--doc/examples/mtm_baseband_power.py17
-rw-r--r--doc/examples/mtm_harmonic_test.py26
-rw-r--r--[-rwxr-xr-x]doc/examples/multi_taper_coh.py43
-rw-r--r--doc/examples/multi_taper_spectral_estimation.py36
-rw-r--r--doc/examples/resting_state_fmri.py52
-rw-r--r--doc/examples/snr_example.py36
-rw-r--r--doc/index.rst4
-rw-r--r--doc/links_names.txt10
-rw-r--r--doc/news.rst4
-rw-r--r--doc/sphinxext/docscrape.py47
-rw-r--r--doc/sphinxext/github.py14
-rw-r--r--doc/sphinxext/numpydoc.py6
-rw-r--r--doc/sphinxext/only_directives.py4
-rw-r--r--doc/sphinxext/plot_directive.py489
-rwxr-xr-xdoc/upload-gh-pages.sh37
-rw-r--r--doc/users/install.rst38
-rw-r--r--doc/users/overview.rst8
-rw-r--r--doc/whatsnew/index.rst4
-rw-r--r--doc/whatsnew/version0.6.rst57
-rw-r--r--doc/whatsnew/version0.7.rst67
-rw-r--r--min-requirements.txt8
-rw-r--r--nitime/__init__.py5
-rw-r--r--nitime/_mpl_units.py2
-rw-r--r--nitime/algorithms/__init__.py9
-rw-r--r--nitime/algorithms/autoregressive.py2
-rw-r--r--nitime/algorithms/cohere.py119
-rw-r--r--nitime/algorithms/entropy.py107
-rw-r--r--nitime/algorithms/event_related.py19
-rw-r--r--nitime/algorithms/filter.py30
-rw-r--r--nitime/algorithms/spectral.py239
-rw-r--r--nitime/algorithms/tests/test_autoregressive.py6
-rw-r--r--nitime/algorithms/tests/test_coherence.py58
-rw-r--r--nitime/algorithms/tests/test_entropy.py49
-rw-r--r--nitime/algorithms/tests/test_spectral.py69
-rw-r--r--nitime/analysis/base.py4
-rw-r--r--nitime/analysis/coherence.py18
-rw-r--r--nitime/analysis/correlation.py4
-rw-r--r--nitime/analysis/event_related.py30
-rw-r--r--nitime/analysis/granger.py4
-rw-r--r--nitime/analysis/snr.py4
-rw-r--r--nitime/analysis/spectral.py76
-rw-r--r--nitime/analysis/tests/test_coherence.py21
-rw-r--r--nitime/analysis/tests/test_granger.py7
-rw-r--r--[-rwxr-xr-x]nitime/data/fmri_timeseries.csv0
-rw-r--r--[-rwxr-xr-x]nitime/data/grasshopper_spike_times1.txt0
-rw-r--r--[-rwxr-xr-x]nitime/data/grasshopper_spike_times2.txt0
-rw-r--r--[-rwxr-xr-x]nitime/data/grasshopper_stimulus1.txt0
-rw-r--r--[-rwxr-xr-x]nitime/data/grasshopper_stimulus2.txt0
-rw-r--r--nitime/fmri/hrf.py2
-rw-r--r--nitime/fmri/io.py4
-rw-r--r--nitime/fmri/tests/test_io.py5
-rw-r--r--nitime/six.py585
-rw-r--r--nitime/testlib.py130
-rw-r--r--nitime/tests/test_algorithms.py24
-rw-r--r--nitime/tests/test_analysis.py42
-rw-r--r--nitime/tests/test_descriptors.py12
-rw-r--r--nitime/tests/test_lazy.py26
-rw-r--r--nitime/tests/test_timeseries.py114
-rw-r--r--nitime/tests/test_utils.py40
-rw-r--r--nitime/tests/test_viz.py11
-rw-r--r--nitime/timeseries.py104
-rw-r--r--nitime/utils.py310
-rw-r--r--nitime/version.py109
-rw-r--r--nitime/viz.py404
-rw-r--r--pyproject.toml63
-rw-r--r--requirements-dev.txt6
-rw-r--r--requirements.txt8
-rwxr-xr-xsetup.py71
-rw-r--r--setup_egg.py2
-rw-r--r--tools/apigen.py24
-rwxr-xr-xtools/build_modref_templates.py4
-rwxr-xr-xtools/build_release2
-rwxr-xr-xtools/ex2rst4
-rwxr-xr-xtools/github_stats.py14
-rwxr-xr-xtools/gitwash_dumper.py4
-rwxr-xr-xtools/make_examples.py43
-rwxr-xr-xtools/release2
-rwxr-xr-xtools/sneeze.py50
-rwxr-xr-xtools/update_requirements.py36
123 files changed, 2347 insertions, 2875 deletions
diff --git a/.git_archival.txt b/.git_archival.txt
new file mode 100644
index 0000000..702e7ab
--- /dev/null
+++ b/.git_archival.txt
@@ -0,0 +1,4 @@
+node: 1dc1d22c239664e2cba9756f61d86bd7467d5d5a
+node-date: 2023-10-02T11:16:01-07:00
+describe-name: 0.10.2
+ref-names: HEAD -> master, tag: 0.10.2
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..00a7b00
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+.git_archival.txt export-subst
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..a91458d
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,49 @@
+name: Test suite
+
+on:
+ push:
+ branches:
+ - master
+ tags:
+ - "*"
+ pull_request:
+ branches:
+ - master
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+
+jobs:
+ test:
+
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
+ requires: ['requirements.txt']
+ include:
+ - python-version: '3.8'
+ requires: 'min-requirements.txt'
+
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v3
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ allow-prereleases: true
+ - name: Install
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install -r ${{ matrix.requires }}
+ python -m pip install -r requirements-dev.txt
+ python -m pip install .
+ - name: Lint
+ run: |
+ pipx run flake8 --ignore N802,N806,W504 --select W503 nitime/ tools/
+ - name: Test
+ run: |
+ mkdir ~/for_test && cd ~/for_test && pytest --pyargs nitime --cov-report term-missing --cov=nitime
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
new file mode 100644
index 0000000..1838751
--- /dev/null
+++ b/.github/workflows/wheels.yml
@@ -0,0 +1,143 @@
+name: Build
+
+on:
+ push:
+ branches:
+ - master
+ tags:
+ - "*"
+ pull_request:
+ branches:
+ - master
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ job_metadata:
+ runs-on: ubuntu-latest
+ outputs:
+ commit_message: ${{ steps.get_commit_message.outputs.commit_message }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 2
+ - name: Print head git commit message
+ id: get_commit_message
+ run: |
+ if [[ -z "$COMMIT_MSG" ]]; then
+ COMMIT_MSG=$(git show -s --format=%s $REF)
+ fi
+ echo commit_message=$COMMIT_MSG | tee -a $GITHUB_OUTPUT
+ env:
+ COMMIT_MSG: ${{ github.event.head_commit.message }}
+ REF: ${{ github.event.pull_request.head.sha }}
+
+ build-sdist:
+ name: Build sdist
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ - name: Build sdist
+ run: pipx run build -s
+ - uses: actions/upload-artifact@v3
+ with:
+ name: sdist
+ path: ./dist/*.tar.gz
+
+ build-wheel:
+ name: Build wheel for ${{ matrix.python }}-${{ matrix.buildplat[1] }}
+ needs: [job_metadata]
+ runs-on: ${{ matrix.buildplat[0] }}
+ if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') || contains(needs.job_metadata.outputs.commit_message, '[build wheels]')
+ strategy:
+ fail-fast: false
+ matrix:
+ buildplat:
+ - [ubuntu-20.04, musllinux_x86_64]
+ - [macos-12, macosx_*]
+ - [windows-2019, win_amd64]
+ python: ["cp38", "cp39", "cp310", "cp311", "cp312"]
+ include:
+ # Manylinux builds are cheap, do all in one
+ - { buildplat: ["ubuntu-20.04", "manylinux_x86_64"], python: "*" }
+
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - uses: actions/setup-python@v3
+
+ - name: Update pip/pipx
+ run: pip install --upgrade pip pipx
+
+ - name: Build wheel(s)
+ run: pipx run --spec "cibuildwheel>=2.15" cibuildwheel
+ env:
+ CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }}
+
+ - uses: actions/upload-artifact@v3
+ with:
+ name: ${{ matrix.python == '*' && 'all' || matrix.python }}-${{ startsWith(matrix.buildplat[1], 'macosx') && 'macosx' || matrix.buildplat[1] }}
+ path: ./wheelhouse/*.whl
+
+ test-sdist:
+ name: Test sdist
+ needs: [build-sdist]
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/download-artifact@v3
+ with:
+ name: sdist
+ path: ./dist
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.11"
+ - name: Display Python version
+ run: python -c "import sys; print(sys.version)"
+ - name: Install sdist
+ run: pip install dist/*.tar.gz
+ - run: python -c 'import nitime; print(nitime.__version__)'
+ - name: Install pytest
+ run: pip install pytest
+ - name: Run tests
+ run: pytest -v --pyargs nitime
+
+ pre-publish:
+ runs-on: ubuntu-latest
+ needs: [test-sdist, build-wheel]
+ steps:
+ - uses: actions/download-artifact@v3
+ with:
+ path: dist/
+ - name: Check artifacts
+ run: ls -lR
+ - name: Consolidate and re-check
+ run: |
+ mv dist/*/*.{tar.gz,whl} dist
+ rmdir dist/*/
+ ls -lR
+ - run: pipx run twine check dist/*
+
+ publish:
+ runs-on: ubuntu-latest
+ environment: "Package deployment"
+ needs: [pre-publish]
+ if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')
+ steps:
+ - uses: actions/download-artifact@v3
+ with:
+ path: dist/
+ - name: Consolidate artifacts
+ run: |
+ mv dist/*/*.{tar.gz,whl} dist
+ rmdir dist/*/
+ - uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ user: __token__
+ password: ${{ secrets.PYPI_API_TOKEN }}
diff --git a/.gitignore b/.gitignore
index f3d74a9..19f336c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,16 @@
+# Compiled python files
*.pyc
+
+# Editor swap files
*~
+.*.swp
+
+# Build artifacts
+*.so
+*.c
+*.egg-info/
+build/
+dist/
+
+# setuptools_scm
+nitime/_version.py
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index f83e536..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-# vim ft=yaml
-# travis-ci.org definition for nipy build
-#
-# We pretend to be erlang because we need can't use the python support in
-# travis-ci; it uses virtualenvs, they do not have numpy, scipy, matplotlib,
-# and it is impractical to build them
-language: erlang
-env:
- - PYTHON=python PYSUF=''
- - PYTHON=python3 PYSUF='3'
-install:
- - sudo add-apt-repository -y ppa:takluyver/python3
- - sudo add-apt-repository -y ppa:chris-lea/cython
- - sudo apt-get update
- - sudo apt-get install $PYTHON-dev $PYTHON-numpy $PYTHON-scipy $PYTHON-matplotlib $PYTHON-setuptools $PYTHON-nose
- - sudo easy_install$PYSUF nibabel networkx # Latest pypi
- ## Cython easy_install breaks with error about refnanny.c; maybe something
- ## to do with having a previous cython version;
- ## http://mail.python.org/pipermail//cython-devel/2012-April/002344.html
- ## (for now, we are using chris-lea's PPA instead of installing manually)
- #- curl -O http://www.cython.org/release/Cython-0.18.zip
- #- unzip Cython-0.18.zip
- #- cd Cython-0.18
- #- sudo python$PYSUF setup.py install
- #- cd ..
- # NITIME:
- - $PYTHON setup.py build
- - sudo $PYTHON setup.py install
-script:
- # Change into an innocuous directory and find tests from installation
- - mkdir for_test
- - cd for_test
- - nosetests$PYSUF --with-doctest `$PYTHON -c "import os; import nitime; print(os.path.dirname(nitime.__file__))"`
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 4911aa3..0000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,18 +0,0 @@
-include *.txt *.py
-include THANKS
-include LICENSE
-include INSTALL
-
-graft nitime
-graft doc
-graft tools
-
-# docs subdirs we want to skip
-prune doc/_build
-prune doc/api/generated
-
-global-exclude *~
-global-exclude *.flc
-global-exclude *.pyc
-global-exclude .dircopy.log
-global-exclude .git
diff --git a/README.txt b/README.txt
index fbe77ce..a3450d0 100644
--- a/README.txt
+++ b/README.txt
@@ -21,7 +21,7 @@ Mailing Lists
Please see the developer's list here::
- http://mail.scipy.org/mailman/listinfo/nipy-devel
+ https://mail.python.org/mailman/listinfo/neuroimaging
Code
====
@@ -48,5 +48,5 @@ for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
-Copyright (c) 2006-2011, NIPY Developers
+Copyright (c) 2006-2023, NIPY Developers
All rights reserved.
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..2d0b734
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,39 @@
+build: false
+
+environment:
+ matrix:
+ - PYTHON: "C:\\Python27"
+ PYTHON_VERSION: "2.7.10"
+ PYTHON_ARCH: "32"
+ MINICONDA: C:\Miniconda
+
+ - PYTHON: "C:\\Python33"
+ PYTHON_VERSION: "3.3.5"
+ PYTHON_ARCH: "32"
+ MINICONDA: C:\Miniconda3
+
+ - PYTHON: "C:\\Python34"
+ PYTHON_VERSION: "3.4.3"
+ PYTHON_ARCH: "32"
+ MINICONDA: C:\Miniconda3
+
+ - PYTHON: "C:\\Python35"
+ PYTHON_VERSION: "3.5.1"
+ PYTHON_ARCH: "32"
+ MINICONDA: C:\Miniconda35
+
+init:
+ - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH% %MINICONDA%"
+
+install:
+ - "set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%PATH%"
+ - conda config --set always_yes yes --set changeps1 no
+ - conda update -q conda
+ - conda info -a
+ - "conda create -q -n test-environment python=%PYTHON_VERSION% cython numpy scipy matplotlib networkx pytest"
+ - activate test-environment
+ - pip install coverage nibabel
+ - python setup.py build_ext --inplace
+
+test_script:
+ - py.test --pyargs nitime
diff --git a/debian/blends b/debian/blends
deleted file mode 100644
index de60ff9..0000000
--- a/debian/blends
+++ /dev/null
@@ -1,9 +0,0 @@
-Format: extended
-Tasks: debian-med/imaging-dev
-Depends: python-nitime
-Language: Python
-
-Tasks: debian-med/imaging
-Recommends: python-nitime
-Why: Although listed in -dev task, it also has a strong focus on interactive
- data analysis.
diff --git a/debian/changelog b/debian/changelog
index 38940f6..66c3377 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,4 +1,190 @@
-nitime (0.5-2) UNRELEASED; urgency=medium
+nitime (0.10.2-1) unstable; urgency=medium
+
+ [ Andreas Tille ]
+ * New upstream version (Closes: #1058416)
+ * Build-Depends: s/dh-python/dh-sequence-python3/ (routine-update)
+ * dh_clean needs more adjustments
+
+ [ Étienne Mollier ]
+ * d/control: build depends on python3-setuptools-scm.
+ * sphinx-conf-fix.patch: new: fix conf.py.
+ This is necessary as doc/conf.py still relies on the old
+ nitime/version.py, which has been replaced by nitime/_version.py,
+ causing various issues when trying to build python-nitime-doc.
+ * d/python-nitime-doc.lintian-overrides: refresh.
+ This fixes a mismatched override about embedded javascript.
+ * d/control: depend on pyproject backend.
+ * demote-cython.patch: new: do not depend on cython3. (Closes: #1057996)
+ * d/control: add myself to uploaders.
+
+ -- Étienne Mollier <emollier@debian.org> Thu, 14 Dec 2023 22:51:37 +0100
+
+nitime (0.10.1-1) unstable; urgency=medium
+
+ [ Andreas Tille ]
+ * Drop debian/blends which is unused
+
+ [ Étienne Mollier ]
+ * New upstream version 0.10.1 (Closes: #1042247)
+ * do-not-set-lowerbound-zero-in-iir.patch: delete: fixed upstream.
+ * fix-psd-test.patch: delete: fixed upstream.
+ * numpy_1.24.patch: remove: applied upstream.
+
+ [ Nilesh Patra ]
+ * Use deprecated networkx function for now, as the drop-in replacement
+ upstream used is causing a regression.
+ Proper Fix suggested upstream already.
+ * Add patch to fix doc FTBFS with new matplotlib
+
+ -- Nilesh Patra <nilesh@debian.org> Fri, 25 Aug 2023 01:55:48 +0530
+
+nitime (0.9-5) unstable; urgency=medium
+
+ [ Andreas Tille ]
+ * Adapt to numpy 1.24 (Closes: #1029245)
+
+ [ Nilesh Patra ]
+ * Actually and genuinely add a patch for numpydoc errors
+
+ -- Nilesh Patra <nilesh@debian.org> Sat, 21 Jan 2023 10:28:38 +0530
+
+nitime (0.9-4) unstable; urgency=medium
+
+ * Add patch to fix test_psd_matlab (Closes: #1027550)
+ * docs/conf.py: Look in axes_grid1 instead of axes_grid
+ * Bump Standards-Version to 4.6.2 (no changes needed)
+
+ -- Nilesh Patra <nilesh@debian.org> Mon, 02 Jan 2023 00:14:51 +0530
+
+nitime (0.9-3) unstable; urgency=medium
+
+ * Add patch to not set ws lower bound to zero (Closes: #1013569)
+
+ -- Nilesh Patra <nilesh@debian.org> Mon, 11 Jul 2022 18:30:08 +0530
+
+nitime (0.9-2) unstable; urgency=medium
+
+ * Switch to pytest instead of nose
+
+ -- Nilesh Patra <nilesh@debian.org> Mon, 25 Oct 2021 02:30:56 +0530
+
+nitime (0.9-1.1) unstable; urgency=medium
+
+ * Non-maintainer upload.
+ * Fix watchfile to detect new versions on github (routine-update)
+ * Standards-Version: 4.6.0 (routine-update)
+ * Apply multi-arch hints.
+ + python-nitime-doc: Add Multi-Arch: foreign.
+
+ -- Andreas Tille <tille@debian.org> Wed, 01 Sep 2021 16:43:09 +0200
+
+nitime (0.9-1) unstable; urgency=medium
+
+ * Add myself to uploaders
+ * New upstream version 0.9
+ * Remove merged patch
+ * Standards-Version: 4.5.1 (routine-update)
+ * Switch watch version to 4
+ * Do not mention removed file in copyright
+
+ -- Nilesh Patra <npatra974@gmail.com> Sun, 20 Dec 2020 20:20:57 +0530
+
+nitime (0.8.1-4) unstable; urgency=medium
+
+ * Team Upload.
+ * Fix according changed sphinx API (Closes: #963663)
+ * Remove ajax inclusion in html files
+ * Fix lintian
+ * debhelper-compat 13 (routine-update)
+ * Update upstream/metadata
+
+ -- Nilesh Patra <npatra974@gmail.com> Sat, 22 Aug 2020 02:34:08 +0530
+
+nitime (0.8.1-3) unstable; urgency=medium
+
+ * Team upload.
+ * Ignore github extension since it is not compatible with Shinx 2.4
+ Closes: #955103
+ * Standards-Version: 4.5.0 (routine-update)
+ * debhelper-compat 12 (routine-update)
+ * Add salsa-ci file (routine-update)
+ * Rules-Requires-Root: no (routine-update)
+
+ -- Andreas Tille <tille@debian.org> Thu, 16 Apr 2020 11:45:12 +0200
+
+nitime (0.8.1-2) unstable; urgency=medium
+
+ * Team upload.
+ * Reupload source package to enable testing migration
+ * Set upstream metadata fields: Bug-Submit.
+
+ -- Andreas Tille <tille@debian.org> Thu, 19 Dec 2019 08:53:30 +0100
+
+nitime (0.8.1-1) unstable; urgency=medium
+
+ [ Michael R. Crusoe ]
+ * Team upload.
+ * New upstream version, python3 only. (Closes: #937145)
+ * Move package to salsa.debian.org/med-team
+
+ [ Andreas Tille ]
+ * Priotity optional
+ * debhelper-compat 9 (leave level 9 for backwards compatibility)
+ * Standards-Version: 4.4.1
+ * Testsuite: autopkgtest-pkg-python
+ * Secure URI in copyright format
+ * Respect DEB_BUILD_OPTIONS in override_dh_auto_test target
+ * Remove trailing whitespace in debian/changelog
+ * Use secure URI in Homepage field.
+ * Set upstream metadata fields: Bug-Database.
+ * Remove unused paragraph from d/copryight
+
+ -- Michael R. Crusoe <michael.crusoe@gmail.com> Sun, 15 Dec 2019 15:32:08 +0100
+
+nitime (0.7-2) unstable; urgency=high
+
+ * Use mathjax sphinx.ext if no pngmath is available (Closes: #922256)
+ * debian/control
+ - boost policy to 4.3.0
+ - remove obsolete X-Python*
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 21 Feb 2019 12:50:06 -0500
+
+nitime (0.7-1) unstable; urgency=medium
+
+ * New upstream release
+ * debian/control
+ - added python-{setuptools,pytest} into build-depends
+ * debian/rules
+ - skip slightly failing test_coherence_linear_dependence for now
+ (see https://github.com/nipy/nitime/issues/150)
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 06 Jan 2017 15:18:05 -0500
+
+nitime (0.6+git15-g4951606-1) unstable; urgency=medium
+
+ * New upstream snapshot from rel/0.6-15-g4951606
+ - contains fixes for compatibility with recent matplotlib etc
+ * debian/control
+ - boosted policy to 3.9.8
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 03 Aug 2016 22:42:06 -0400
+
+nitime (0.6-1) unstable; urgency=medium
+
+ * Fresh upstream bugfix release (Closes: #812700)
+ * debian/patches -- dropped 2 patches previously picked up from upstream VCS
+ * debian/watch -- adjusted for deprecated githubredir
+
+ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 07 Feb 2016 18:57:21 -0500
+
+nitime (0.5-3) unstable; urgency=medium
+
+ * Include upstream patch for modern matplotlib versions (Closes: #802524).
+
+ -- Michael Hanke <mih@debian.org> Wed, 30 Dec 2015 10:18:40 +0100
+
+nitime (0.5-2) unstable; urgency=medium
* Update maintainer email address.
* Remove dependency on python-support, while upgrading to dh9
@@ -66,7 +252,7 @@ nitime (0.3.1-1) unstable; urgency=low
nitime (0.3-1) UNRELEASED; urgency=low
* Fresh upstream release
- * Adjusted debian/watch and added a rudimentary get-orig-source which
+ * Adjusted debian/watch and added a rudimentary get-orig-source which
uses uscan to fetch tarballs from github
-- Yaroslav Halchenko <debian@onerussian.com> Mon, 15 Aug 2011 16:29:48 -0400
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index ec63514..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/debian/control b/debian/control
index 8a372ee..dd45b42 100644
--- a/debian/control
+++ b/debian/control
@@ -1,38 +1,43 @@
Source: nitime
-Maintainer: NeuroDebian Team <team@neuro.debian.net>
+Maintainer: Debian Med Packaging Team <debian-med-packaging@lists.alioth.debian.org>
Uploaders: Yaroslav Halchenko <debian@onerussian.com>,
- Michael Hanke <mih@debian.org>
+ Michael Hanke <mih@debian.org>,
+ Nilesh Patra <nilesh@debian.org>,
+ Étienne Mollier <emollier@debian.org>
Section: python
-Priority: extra
-Build-Depends: debhelper (>= 9~),
- python-all,
- dh-python,
- python-numpy,
- python-scipy,
- python-matplotlib,
- python-tk,
- python-sphinx,
- python-nose,
- python-networkx,
- python-nibabel,
+Testsuite: autopkgtest-pkg-python
+Priority: optional
+Build-Depends: debhelper-compat (= 13),
+ python3-all,
+ dh-sequence-python3,
+ pybuild-plugin-pyproject,
+ python3-numpy,
+ python3-scipy,
+ python3-matplotlib,
+ python3-tk,
+ python3-sphinx,
+ python3-networkx,
+ python3-nibabel,
+ python3-setuptools,
+ python3-setuptools-scm,
+ python3-pytest,
graphviz
-Standards-Version: 3.9.6
-Vcs-Browser: http://github.com/yarikoptic/nitime
-Vcs-Git: git://github.com/yarikoptic/nitime.git
-Homepage: http://nipy.org/nitime
-X-Python-Version: >= 2.6
+Standards-Version: 4.6.2
+Vcs-Browser: https://salsa.debian.org/med-team/nitime
+Vcs-Git: https://salsa.debian.org/med-team/nitime.git
+Homepage: https://nipy.org/nitime
+Rules-Requires-Root: no
-Package: python-nitime
+Package: python3-nitime
Architecture: all
-Depends: ${python:Depends},
+Depends: ${python3:Depends},
${shlibs:Depends},
${misc:Depends},
- python-numpy,
- python-scipy
-Recommends: python-matplotlib,
- python-nose,
- python-nibabel,
- python-networkx
+ python3-numpy,
+ python3-scipy
+Recommends: python3-matplotlib,
+ python3-nibabel,
+ python3-networkx
Description: timeseries analysis for neuroscience data (nitime)
Nitime is a Python module for time-series analysis of data from
neuroscience experiments. It contains a core of numerical algorithms
@@ -48,7 +53,8 @@ Section: doc
Depends: ${misc:Depends},
libjs-jquery,
libjs-underscore
-Suggests: python-nitime
+Suggests: python3-nitime
+Multi-Arch: foreign
Description: timeseries analysis for neuroscience data (nitime) -- documentation
Nitime is a Python module for time-series analysis of data from
neuroscience experiments.
diff --git a/debian/copyright b/debian/copyright
index 824d75a..b6892bc 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -1,4 +1,4 @@
-Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: nitime
Upstream-Contact: nipy-devel@neuroimaging.scipy.org
Source: http://github.com/nipy/nitime
@@ -12,19 +12,11 @@ Files: doc/sphinxext/*
Copyright: 2007-2009, Stefan van der Walt and Sphinx team
License: BSD-3
-Files: nitime/tests/decotest.py
-Copyright: 2009, The IPython Development Team
-License: BSD-3
-
Files: nitime/_mpl_units.py
Copyright: 2012-2013, Matplotlib Development Team
License: PSF-Matplotlib-license
Comment: for compatibility with older matplotlib versions
-Files: nitime/six.py
-Copyright: 2010-2013, Benjamin Peterson
-License: Expat
-
Files: debian/*
Copyright: 2010-2014, Yaroslav Halchenko <debian@onerussian.com>
License: BSD-3
@@ -104,21 +96,3 @@ License: PSF-Matplotlib-license
Licensee agrees to be bound by the terms and conditions of this License
Agreement.
-License: Expat
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
- .
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
- .
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
diff --git a/debian/gbp.conf b/debian/gbp.conf
deleted file mode 100644
index 0e3ce00..0000000
--- a/debian/gbp.conf
+++ /dev/null
@@ -1,18 +0,0 @@
-[DEFAULT]
-upstream-branch = master
-debian-branch = debian
-upstream-tag = %(version)s
-debian-tag = debian/%(version)s
-
-# Options only affecting git-buildpackage
-[git-buildpackage]
-# ignore some any non-gitted files
-ignore-new = True
-#upstream-branch = dfsgclean
-# uncomment this to automatically GPG sign tags
-sign-tags = True
-# use this for more svn-buildpackage like bahaviour:
-export-dir = ../build-area/
-tarball-dir = ../tarballs/
-
-
diff --git a/debian/patches/deb_no_sources_for_docs b/debian/patches/deb_no_sources_for_docs
index d95e4a6..b33abaf 100644
--- a/debian/patches/deb_no_sources_for_docs
+++ b/debian/patches/deb_no_sources_for_docs
@@ -1,6 +1,8 @@
---- a/doc/conf.py
-+++ b/doc/conf.py
-@@ -131,6 +131,9 @@ today_fmt = '%B %d, %Y, %H:%M PDT'
+Author: Yaroslav Halchenko <debian@onerussian.com>
+Description: prevent inclusion of _sources which obscures offline search
+--- nitime.orig/doc/conf.py
++++ nitime/doc/conf.py
+@@ -120,6 +120,9 @@
# for source files.
exclude_trees = ['_build']
diff --git a/debian/patches/demote-cython.patch b/debian/patches/demote-cython.patch
new file mode 100644
index 0000000..2806bda
--- /dev/null
+++ b/debian/patches/demote-cython.patch
@@ -0,0 +1,20 @@
+Description: demote cython requirement.
+ The dependency on cython3 is needed at build time, but there is no reason for
+ it to be required for end users of nitime at runtime.
+
+Author: Étienne Mollier <emollier@debian.org>
+Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1057996
+Forwarded: no
+Last-Update: 2023-12-14
+---
+This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
+--- nitime.orig/pyproject.toml
++++ nitime/pyproject.toml
+@@ -2,7 +2,6 @@
+ requires = [
+ "setuptools",
+ "setuptools_scm[toml]>=6.2",
+- "cython",
+ # As of numpy 1.25, you can now build against older APIs.
+ # https://numpy.org/doc/stable/release/1.25.0-notes.html
+ "numpy>=1.25; python_version > '3.8'",
diff --git a/debian/patches/matplotlib.patch b/debian/patches/matplotlib.patch
new file mode 100644
index 0000000..7bdc586
--- /dev/null
+++ b/debian/patches/matplotlib.patch
@@ -0,0 +1,14 @@
+Description: Change make_axes_locatable's location with mpl's changed API
+Author: Nilesh Patra <nilesh@debian.org>
+Last-Update: 2023-08-24
+--- a/doc/conf.py
++++ b/doc/conf.py
+@@ -23,7 +23,7 @@
+ # is a string that should be a valid (possibly dotted) package name, and the
+ # second a list (possibly empty) of names to import from that package.
+ doc_deps = [['networkx', []],
+- ['mpl_toolkits.axes_grid', ['make_axes_locatable']],
++ ['mpl_toolkits.axes_grid1.axes_divider', ['make_axes_locatable']],
+ ]
+
+ # Analyze the dependencies, and fail if any is unmet, with a hopefully
diff --git a/debian/patches/numpydoc_1.24.patch b/debian/patches/numpydoc_1.24.patch
new file mode 100644
index 0000000..f81e5f8
--- /dev/null
+++ b/debian/patches/numpydoc_1.24.patch
@@ -0,0 +1,18 @@
+Description: Fix python3.11 related errors in doc spec
+Bug-Debian: https://bugs.debian.org/1029245
+Author: Nilesh Patra <nilesh@debian.org>
+Last-Update: Fri, 21 Jan 2023 10:27:25 +0530
+
+--- a/doc/sphinxext/docscrape.py
++++ b/doc/sphinxext/docscrape.py
+@@ -428,8 +428,8 @@
+ func, func_name = self.get_func()
+ try:
+ # try to read signature
+- argspec = inspect.getargspec(func)
+- argspec = inspect.formatargspec(*argspec)
++ argspec = inspect.signature(func)
++ argspec = str(argspec)
+ argspec = argspec.replace('*','\*')
+ signature = '%s%s' % (func_name, argspec)
+ except TypeError as e:
diff --git a/debian/patches/python3 b/debian/patches/python3
new file mode 100644
index 0000000..2042a79
--- /dev/null
+++ b/debian/patches/python3
@@ -0,0 +1,97 @@
+Author: Michael R. Crusoe <michael.crusoe@gmail.com>
+Last-Update: 2019-12-15 15:37:17
+Bug-Debian: https://bugs.debian.org/937145
+Description: Python3 fixes
+
+--- a/tools/build_modref_templates.py
++++ b/tools/build_modref_templates.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ """Script to auto-generate our API docs.
+ """
+ # stdlib imports
+--- a/tools/github_stats.py
++++ b/tools/github_stats.py
+@@ -1,11 +1,11 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ """Simple tools to query github.com and gather stats about issues.
+ """
+ #-----------------------------------------------------------------------------
+ # Imports
+ #-----------------------------------------------------------------------------
+
+-from __future__ import print_function
++
+
+ import json
+ import re
+--- a/tools/gitwash_dumper.py
++++ b/tools/gitwash_dumper.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ ''' Checkout gitwash repo into directory and do search replace on name '''
+
+ import os
+@@ -84,7 +84,7 @@ def copy_replace(replace_pairs,
+ for rep_glob in rep_globs:
+ fnames += fnmatch.filter(out_fnames, rep_glob)
+ if verbose:
+- print '\n'.join(fnames)
++ print('\n'.join(fnames))
+ for fname in fnames:
+ filename_search_replace(replace_pairs, fname, False)
+ for in_exp, out_exp in renames:
+--- a/tools/build_release
++++ b/tools/build_release
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ """Nitime release build script.
+ """
+ from toollib import *
+--- a/tools/ex2rst
++++ b/tools/ex2rst
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ #
+ # Note: this file is copied (possibly with minor modifications) from the
+ # sources of the PyMVPA project - http://pymvpa.org. It remains licensed as
+--- a/tools/make_examples.py
++++ b/tools/make_examples.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ """Run the py->rst conversion and run all examples.
+
+ This also creates the index.rst file appropriately, makes figures, etc.
+--- a/tools/release
++++ b/tools/release
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ """Nitime release script.
+
+ This should only be run at real release time.
+--- a/setup.py
++++ b/setup.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ """Setup file for the Python nitime package.
+
+ This file only contains cython components.
+--- a/doc/Makefile
++++ b/doc/Makefile
+@@ -37,7 +37,7 @@ htmlonly:
+ @echo "Build finished. The HTML pages are in _build/html."
+
+ api:
+- python ../tools/build_modref_templates.py
++ python3 ../tools/build_modref_templates.py
+ @echo "Build API docs finished."
+
+ html: rstexamples api htmlonly
diff --git a/debian/patches/series b/debian/patches/series
index 2ce853c..7a02c7d 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,2 +1,7 @@
deb_no_sources_for_docs
-up_version_info_python2.6
+python3
+sphinx_ignore_github.patch
+numpydoc_1.24.patch
+matplotlib.patch
+sphinx-conf-fix.patch
+demote-cython.patch
diff --git a/debian/patches/sphinx-conf-fix.patch b/debian/patches/sphinx-conf-fix.patch
new file mode 100644
index 0000000..d2a6fa0
--- /dev/null
+++ b/debian/patches/sphinx-conf-fix.patch
@@ -0,0 +1,28 @@
+Description: fix sphinx conf.py file.
+ Since introduction of nitime/_version.py through the move to pyproject.toml,
+ the sphinx documentation configuration script does not seem to have followed.
+ Use of the former variables are causing a variety of errors at documentation
+ build time.
+
+Author: Étienne Mollier <emollier@debian.org>
+Forwarded: https://github.com/nipy/nitime/pull/212
+Last-Update: 2023-12-14
+---
+This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
+--- nitime.orig/doc/conf.py
++++ nitime/doc/conf.py
+@@ -93,12 +93,12 @@
+ # We read the version info from the source file.
+ ver = {}
+
+-ver_file = os.path.join('..', 'nitime', 'version.py')
++ver_file = os.path.join('..', 'nitime', '_version.py')
+ with open(ver_file) as f:
+ exec(f.read())
+
+ # The short X.Y version.
+-version = '%s.%s' % (_version_major, _version_minor)
++version = '%s.%s' % (version_tuple[0], version_tuple[1])
+ # The full version, including alpha/beta/rc tags.
+ release = __version__
+
diff --git a/debian/patches/sphinx_ignore_github.patch b/debian/patches/sphinx_ignore_github.patch
new file mode 100644
index 0000000..fbf5da0
--- /dev/null
+++ b/debian/patches/sphinx_ignore_github.patch
@@ -0,0 +1,15 @@
+Description: Ignore github extension since it is not compatible with Shinx 2.4
+Bug-Debian: https://bugs.debian.org/955103
+Author: Andreas Tille <tille@debian.org>
+Last-Update: Thu, 16 Apr 2020 11:34:52 +0200
+
+--- a/doc/conf.py
++++ b/doc/conf.py
+@@ -65,7 +65,6 @@ extensions = ['sphinx.ext.autodoc',
+ 'only_directives',
+ 'math_dollar',
+ 'matplotlib.sphinxext.plot_directive',
+- 'github'
+ ]
+
+ # ghissue config
diff --git a/debian/patches/up_version_info_python2.6 b/debian/patches/up_version_info_python2.6
deleted file mode 100644
index 7cf84f7..0000000
--- a/debian/patches/up_version_info_python2.6
+++ /dev/null
@@ -1,22 +0,0 @@
-diff --git a/nitime/tests/test_lazy.py b/nitime/tests/test_lazy.py
-index 70a9cda..bbb874f 100644
---- a/nitime/tests/test_lazy.py
-+++ b/nitime/tests/test_lazy.py
-@@ -28,11 +28,13 @@ def test_lazy_noreload():
- mod = l.LazyImport('sys')
- # accessing module dictionary will trigger an import
- len(mod.__dict__)
-- if sys.version_info.major == 2:
-+ # do not use named tuple feature for Python 2.6 compatibility
-+ major, minor = sys.version_info[:2]
-+ if major == 2:
- npt.assert_raises(ImportError, reload, mod)
-- elif sys.version_info.major == 3:
-+ elif major == 3:
- import imp
-- if sys.version_info.minor == 2:
-+ if minor == 2:
- npt.assert_raises(ImportError, imp.reload, mod)
-- elif sys.version_info.minor == 3:
-+ elif minor == 3:
- npt.assert_raises(TypeError, imp.reload, mod)
diff --git a/debian/python-nitime-doc.lintian-overrides b/debian/python-nitime-doc.lintian-overrides
new file mode 100644
index 0000000..ebdb9f6
--- /dev/null
+++ b/debian/python-nitime-doc.lintian-overrides
@@ -0,0 +1,2 @@
+# False Positives
+python-nitime-doc: embedded-javascript-library please use sphinx [usr/share/doc/python-nitime-doc/html/_static/*.js]
diff --git a/debian/python-nitime.install b/debian/python3-nitime.install
index 326a444..326a444 100644
--- a/debian/python-nitime.install
+++ b/debian/python3-nitime.install
diff --git a/debian/rules b/debian/rules
index b7b5a0f..66026e9 100755
--- a/debian/rules
+++ b/debian/rules
@@ -1,50 +1,38 @@
#!/usr/bin/make -f
# -*- makefile -*-
-PACKAGE_NAME = python-nitime
+PACKAGE_NAME = python3-nitime
PACKAGE_ROOT_DIR = debian/${PACKAGE_NAME}
INSTALL_PATH = $(CURDIR)/debian/tmp
# default Python
-PYTHON=$(shell pyversions -d)
+PYTHON=$(shell py3versions -d)
# override matplotlib config directory
export MPLCONFIGDIR=$(CURDIR)/build
export HOME=$(CURDIR)/build
%:
- dh $@ --with python2
-
-override_dh_auto_test:
- : # Do not test just after build, lets install and then test
+ dh $@ --buildsystem pybuild
override_dh_auto_install:
dh_auto_install
mkdir -p $(MPLCONFIGDIR) # just in case
- echo "backend : Agg" >| $(MPLCONFIGDIR)/matplotlibrc
: # Prune duplicate LICENSE file
find debian/ -name LICENSE -delete
: # Only now lets build docs
ifeq (,$(filter nodoc,$(DEB_BUILD_OPTIONS)))
export PYTHONPATH=$$(/bin/ls -d $(INSTALL_PATH)/usr/lib/$(PYTHON)/*-packages); \
- $(MAKE) -C doc html
+ $(MAKE) -C doc html-no-exec
+ # TODO: remove -no-exec when doc/examples/filtering_fmri.py is fixed
-rm doc/_build/html/_static/jquery.js
-rm doc/_build/html/_static/underscore.js
-rm -r doc/_build/html/_sources
: # objects inventory is of no use for the package
-rm doc/_build/html/objects.inv
-endif
-
-# All tests later on
-# cd build to prevent use of local/not-built source tree
-ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
- cd build; \
- for PYTHON in $(shell pyversions -r); do \
- echo "I: Running NiTime unittests using $$PYTHON"; \
- PYTHONPATH=$$(/bin/ls -d $(INSTALL_PATH)/usr/lib/$$PYTHON/*-packages) \
- MPLCONFIGDIR=/tmp/ \
- $$PYTHON /usr/bin/nosetests --exclude=test_lazy_reload nitime; \
+ for f in `find . -name "*.html"`; do \
+ sed -i -e '/cloudflare/d' -e '/jsdelivr/d' $$f; \
done
endif
@@ -56,7 +44,14 @@ override_dh_compress:
override_dh_clean:
dh_clean
@echo "I: Removing other generated material"
- rm -rf build doc/_build doc/examples/fig doc/api/generated/ doc/examples/*rst
+ rm -rf \
+ build \
+ doc/_build \
+ doc/examples/fig \
+ doc/api/generated/ \
+ doc/examples/*rst \
+ nitime/_utils.c \
+ nitime/_version.py
get-orig-source:
-uscan --upstream-version 0 --rename
diff --git a/debian/salsa-ci.yml b/debian/salsa-ci.yml
new file mode 100644
index 0000000..33c3a64
--- /dev/null
+++ b/debian/salsa-ci.yml
@@ -0,0 +1,4 @@
+---
+include:
+ - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/salsa-ci.yml
+ - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/pipeline-jobs.yml
diff --git a/debian/upstream/metadata b/debian/upstream/metadata
new file mode 100644
index 0000000..8b7ec28
--- /dev/null
+++ b/debian/upstream/metadata
@@ -0,0 +1,7 @@
+---
+Archive: GitHub
+Bug-Database: https://github.com/nipy/nitime/issues
+Bug-Submit: https://github.com/nipy/nitime/issues/new
+Changelog: https://github.com/nipy/nitime/tags
+Repository: https://github.com/nipy/nitime.git
+Repository-Browse: https://github.com/nipy/nitime
diff --git a/debian/watch b/debian/watch
index f9099e3..aef17e9 100644
--- a/debian/watch
+++ b/debian/watch
@@ -1,4 +1,4 @@
-version=3
+version=4
-opts="uversionmangle=s/rc/~rc/g" \
- http://githubredir.debian.net/github/nipy/nitime/ rel/(.+).tar.gz
+opts="uversionmangle=s/rc/~rc/,filenamemangle=s/.*?([\d\.]+)\..*/nitime_$1.orig.tar.gz/" \
+ https://github.com/nipy/nitime/tags (?:.*?/)?v?@ANY_VERSION@@ARCHIVE_EXT@
diff --git a/doc/Makefile b/doc/Makefile
index 1e339e4..6396445 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -23,12 +23,11 @@ help:
"items (ChangeLog)"
@echo " linkcheck check all external links for integrity"
@echo " doctest run all doctests embedded in the documentation"
- @echo " sf_fer_perez copy html files to sourceforge (fer_perez only)"
- @echo " sf_arokem copy html files to sourceforge (arokem only)"
+ @echo " upload Upload the docs to github pages website"
clean:
-rm -rf _build/* *~ api/generated
- -rm -rf build/*
+ -rm -rf build/*
-rm examples/fig/*
-rm examples/*.rst
@@ -37,14 +36,14 @@ htmlonly:
@echo
@echo "Build finished. The HTML pages are in _build/html."
-api:
- python ../tools/build_modref_templates.py
+api:
+ python3 ../tools/build_modref_templates.py
@echo "Build API docs finished."
-html: rstexamples api htmlonly
+html: rstexamples api htmlonly
@echo "Build HTML and API finished."
-html-no-exec: rstexamples-no-exec api htmlonly
+html-no-exec: rstexamples-no-exec api htmlonly
@echo "Build HTML and API finished."
latex: api
@@ -81,31 +80,6 @@ rstexamples:
rstexamples-no-exec:
../tools/make_examples.py --no-exec
-# Sourceforge doesn't appear to have a way of copying the files
-# without specifying a username. So we'll probably have one target
-# for each project admin
-#
-# Note: If several developers alternate commits, we may get permission errors
-# from rsync with the upload targets. If that happens, use the *_full targets
-# below which first wipe out the whole dir on SF and then re-upload.
-
-sf_fer_perez:
- @echo "Copying html files to sourceforge..."
- rsync -avH --delete -e ssh _build/html/ fer_perez,nipy@web.sourceforge.net:htdocs/nitime
-
-sf_arokem:
- @echo "Copying html files to sourceforge..."
- rsync -avH --delete -e ssh _build/html/ arokem,nipy@web.sourceforge.net:htdocs/nitime
-
-# Targets that force a clean and re-upload
-sf_fer_perez_clean:
- @echo "Cleaning up sourceforge site"
- ssh fer_perez,nipy@shell.sf.net "rm -rf /home/groups/n/ni/nipy/htdocs/nitime/*"
-
-sf_fer_perez_full: sf_fer_perez_clean sf_fer_perez
-
-sf_arokem_clean:
- @echo "Cleaning up sourceforge site"
- ssh arokem,nipy@shell.sf.net "rm -rf /home/groups/n/ni/nipy/htdocs/nitime/*"
-sf_arokem_full: sf_arokem_clean sf_arokem
+upload: html
+ ./upload-gh-pages.sh _build/html/ nitime nipy
diff --git a/doc/conf.py b/doc/conf.py
index ae01b40..8aabc40 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -23,7 +23,7 @@ import warnings
# is a string that should be a valid (possibly dotted) package name, and the
# second a list (possibly empty) of names to import from that package.
doc_deps = [['networkx', []],
- ['mpl_toolkits.axes_grid', ['make_axes_locatable']],
+ ['mpl_toolkits.axes_grid1.axes_divider', ['make_axes_locatable']],
]
# Analyze the dependencies, and fail if any is unmet, with a hopefully
@@ -37,53 +37,34 @@ for package, parts in doc_deps:
if failed_deps:
print
- print "*** ERROR IN DOCUMENTATION BUILD ***"
- print "The documentation build is missing these dependencies:"
+ print("*** ERROR IN DOCUMENTATION BUILD ***")
+ print("The documentation build is missing these dependencies:")
for pak, parts in failed_deps:
if parts:
- print "Package: %s, parts: %s" % (pak, parts)
+ print("Package: %s, parts: %s" % (pak, parts))
else:
- print "Package: %s" % pak
+ print("Package: %s" % pak)
raise RuntimeError('Unmet dependencies for documentation build')
-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
-#-----------------------------------------------------------------------------
-# Error control in examples and plots
-#-----------------------------------------------------------------------------
-# We want by default our documentation to NOT build if any plot warnings are
-# generated, so we turn PlotWarning into an error. For now this requires using
-# a patched version of the plot_directive, but we'll upstream this to matplotlib.
-import plot_directive
-# If you *really* want to disable these error checks to be able to finish a doc
-# build, comment out the next line. But please do NOT leave it uncommented in
-# a committed file, so that the official build is always in the paranoid mode
-# (where the warnings become errors).
-warnings.simplefilter('error', plot_directive.PlotWarning)
-
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
- #'sphinx.ext.intersphinx',
+ 'sphinx.ext.intersphinx',
'sphinx.ext.todo',
- 'sphinx.ext.pngmath',
'numpydoc',
'sphinx.ext.inheritance_diagram',
'ipython_console_highlighting',
'only_directives',
- 'math_dollar', # Support for $x$ math
- # For now, we use our own patched plot directive, we'll revert
- # back to the official one once our changes are upstream.
- #'matplotlib.sphinxext.plot_directive',
- 'plot_directive',
- 'github'
+ 'math_dollar',
+ 'matplotlib.sphinxext.plot_directive',
]
# ghissue config
@@ -111,11 +92,15 @@ copyright = u'2009, Neuroimaging in Python team'
#
# We read the version info from the source file.
ver = {}
-execfile('../nitime/version.py', ver)
+
+ver_file = os.path.join('..', 'nitime', '_version.py')
+with open(ver_file) as f:
+ exec(f.read())
+
# The short X.Y version.
-version = '%s.%s' % (ver['_version_major'], ver['_version_minor'])
+version = '%s.%s' % (version_tuple[0], version_tuple[1])
# The full version, including alpha/beta/rc tags.
-release = ver['__version__']
+release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/discussion/base_classes.rst b/doc/discussion/base_classes.rst
index 591f9b7..1a23cb5 100644
--- a/doc/discussion/base_classes.rst
+++ b/doc/discussion/base_classes.rst
@@ -23,7 +23,7 @@ representation of financial time-series, which can be compared against each
other, using the common reference and for which the concept of the work-week
applies.
-However, because most often the absolute calender time of the occurence of
+However, because most often the absolute calendar time of the occurrence of
events in an experiment is of no importance, we can disregard it. Rather, the
comparison of the time progression of data in different experiments conducted
in different calendar times (different days, different times in the same day)
@@ -71,9 +71,9 @@ the essential differences between them.
-------------------
This class has the least restrictions on it: it will be a 1d array, which
-contains time-points that are not neccesarily ordered. It can also contain
+contains time-points that are not necessarily ordered. It can also contain
several copies of the same time-point. This class will be used in order to
-represent sparsely occuring events, measured at some unspecified sampling rate
+represent sparsely occurring events, measured at some unspecified sampling rate
and possibly collected from several different channels, where the data is
sampled in order of channel and not in order of time. As in the case of the
:class:`np.ndarray`, slicing into this kind of representation should allow a
diff --git a/doc/discussion/interval_object.rst b/doc/discussion/interval_object.rst
index 35461ca..d19faa2 100644
--- a/doc/discussion/interval_object.rst
+++ b/doc/discussion/interval_object.rst
@@ -9,7 +9,7 @@ time-series. For example, a typical case, is when two time-series are recorded
simultaneously. One is recorded from measurement of some continuous
physilogical variable, such as fMRI BOLD (and is represented by a an object of
type :ref:`UniformTimeSeries`). The other is a series of discrete events
-occuring concurrently (and can be represented by a :ref:`EventSeries` or by a
+occurring concurrently (and can be represented by a :ref:`EventSeries` or by a
:ref:`NonUniformTimeSeries`). For example, button presses by the subject, or
trials of different kinds. If we want to analyze the progression of the
physiological time-series, locked to particular kinds of events in the
diff --git a/doc/discussion/time_series_access.rst b/doc/discussion/time_series_access.rst
index a1df225..3a2aa26 100644
--- a/doc/discussion/time_series_access.rst
+++ b/doc/discussion/time_series_access.rst
@@ -52,7 +52,7 @@ for which $t_i$ is the maximal one, which still fulfills: $t_i<t$.
Questions
~~~~~~~~~
-The follwing questions apply to all three cases:
+The following questions apply to all three cases:
* what happens when the t is smaller than the smallest entry in the array
return None?
diff --git a/doc/examples/.gitignore b/doc/examples/.gitignore
new file mode 100644
index 0000000..30d8556
--- /dev/null
+++ b/doc/examples/.gitignore
@@ -0,0 +1 @@
+*.rst
diff --git a/doc/examples/ar_est_1var.py b/doc/examples/ar_est_1var.py
index 29ce8cd..29ce8cd 100755..100644
--- a/doc/examples/ar_est_1var.py
+++ b/doc/examples/ar_est_1var.py
diff --git a/doc/examples/ar_est_2vars.py b/doc/examples/ar_est_2vars.py
index 334bcac..98ee597 100644
--- a/doc/examples/ar_est_2vars.py
+++ b/doc/examples/ar_est_2vars.py
@@ -4,7 +4,7 @@
.. _mar:
=====================================
-Mulitvariate auto-regressive modeling
+Multivariate auto-regressive modeling
=====================================
Multivariate auto-regressive modeling uses a simple
@@ -133,7 +133,6 @@ Sw_true = alg.spectral_matrix_xy(Hw, cov)
Next, we will generate 500 example sets of 100 points of these processes, to analyze:
-
"""
#Number of realizations of the process
@@ -149,7 +148,7 @@ n_process = am.shape[-1]
z = np.empty((N, n_process, L))
nz = np.empty((N, n_process, L))
-for i in xrange(N):
+for i in range(N):
z[i], nz[i] = utils.generate_mar(am, cov, L)
"""
@@ -187,7 +186,7 @@ order to choose an appropriate order, given the data.
Rxx = np.empty((N, n_process, n_process, n_lags))
-for i in xrange(N):
+for i in range(N):
Rxx[i] = utils.autocov_vector(z[i], nlags=n_lags)
Rxx = Rxx.mean(axis=0)
@@ -242,7 +241,7 @@ Syy_true = Sw_true[1, 1].real
"""
-The other is an estimate based on a multi-taper spectral estimate from the
+The other is an estimate based on a multitaper spectral estimate from the
empirical signals:
"""
@@ -250,7 +249,7 @@ empirical signals:
c_x = np.empty((L, w.shape[0]))
c_y = np.empty((L, w.shape[0]))
-for i in xrange(N):
+for i in range(N):
frex, c_x[i], nu = alg.multi_taper_psd(z[i][0])
frex, c_y[i], nu = alg.multi_taper_psd(z[i][1])
@@ -273,9 +272,9 @@ ax01.legend()
.. image:: fig/ar_est_2vars_01.png
-Next, we plot the granger causalities. There are three GCs. One for each
+Next, we plot the Granger causalities. There are three GCs. One for each
direction of causality between the two processes (X => Y and Y => X). In
-addition, there is the instanteaneous causality between the processes:
+addition, there is the instantaneous causality between the processes:
"""
@@ -320,7 +319,7 @@ ax03.plot(w, f_id, label='Interdependence')
coh = np.empty((N, 33))
-for i in xrange(N):
+for i in range(N):
frex, this_coh = alg.coherence(z[i])
coh[i] = this_coh[0, 1]
@@ -341,7 +340,6 @@ plt.show()
"""
-
.. [Ding2006] M. Ding, Y. Chen and S.L. Bressler (2006) Granger causality:
basic theory and application to neuroscience. In Handbook of Time Series
Analysis, ed. B. Schelter, M. Winterhalder, and J. Timmer, Wiley-VCH
@@ -351,5 +349,4 @@ plt.show()
Characterization by Partial Autocorrelation Matrices. The Annals of Statistics,
6: 643-648
-
"""
diff --git a/doc/examples/ar_est_3vars.py b/doc/examples/ar_est_3vars.py
index 7e21504..b69db25 100644
--- a/doc/examples/ar_est_3vars.py
+++ b/doc/examples/ar_est_3vars.py
@@ -3,7 +3,7 @@
.. _mar3:
=====================================================
- Mulitvariate auto-regressive modeling - 3 variables
+ Multivariate auto-regressive modeling - 3 variables
=====================================================
This example is an extension of the example presented here: :ref:`mar`. Here,
@@ -33,7 +33,7 @@ np.random.seed(1981)
"""
-simulate two multivariate autoregressive systems.
+Simulate two multivariate autoregressive systems.
The first is defined by the following equations:
@@ -132,7 +132,7 @@ za = np.empty((N, 3, L))
zb = np.empty((N, 3, L))
ea = np.empty((N, 3, L))
eb = np.empty((N, 3, L))
-for i in xrange(N):
+for i in range(N):
za[i], ea[i] = utils.generate_mar(a, cov, L)
zb[i], eb[i] = utils.generate_mar(b, cov, L)
@@ -146,7 +146,7 @@ of auto-covariance at lags k=0,1,2
Raxx = np.empty((N, 3, 3, 3))
Rbxx = np.empty((N, 3, 3, 3))
-for i in xrange(N):
+for i in range(N):
Raxx[i] = utils.autocov_vector(za[i], nlags=3)
Rbxx[i] = utils.autocov_vector(zb[i], nlags=3)
diff --git a/doc/examples/ar_model_fit.py b/doc/examples/ar_model_fit.py
index 527dcb7..44eae34 100644
--- a/doc/examples/ar_model_fit.py
+++ b/doc/examples/ar_model_fit.py
@@ -4,7 +4,7 @@
Fitting an MAR model: analyzer interface
========================================
-In this example, we will use the Analyzer interface to fit a multi-variate
+In this example, we will use the Analyzer interface to fit a multivariate
auto-regressive model with two time-series influencing each other.
We start by importing 3rd party modules:
@@ -56,7 +56,7 @@ cov = np.array([[x_var, xy_cov],
"""
-Number of realizations of the process
+Number of realizations of the process:
"""
@@ -79,7 +79,7 @@ z = np.empty((N, n_process, L))
nz = np.empty((N, n_process, L))
np.random.seed(1981)
-for i in xrange(N):
+for i in range(N):
z[i], nz[i] = utils.generate_mar(am, cov, L)
@@ -90,7 +90,7 @@ We start by estimating the order of the model from the data:
"""
est_order = []
-for i in xrange(N):
+for i in range(N):
this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0], z[i][1])
est_order.append(this_order)
@@ -98,9 +98,9 @@ order = int(np.round(np.mean(est_order)))
"""
-Once we have estimated the order, we go ahead and fit each realization of the
+Once we have estimated the order, we go ahead and fit each realization of the
MAR model, constraining the model order accordingly (by setting the order
-key-word argument) to be always equal to the model order estimated above.
+keyword argument) to be always equal to the model order estimated above.
"""
@@ -108,7 +108,7 @@ Rxx = np.empty((N, n_process, n_process, n_lags))
coef = np.empty((N, n_process, n_process, order))
ecov = np.empty((N, n_process, n_process))
-for i in xrange(N):
+for i in range(N):
this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0], z[i][1], order=order)
Rxx[i] = this_Rxx
coef[i] = this_coef
@@ -117,7 +117,8 @@ for i in xrange(N):
"""
We generate a time-series from the recovered coefficients, using the same
-randomization seed as the first mar. These should look pretty similar to each other:
+randomization seed as the first mar. These should look pretty similar to each
+other:
"""
diff --git a/doc/examples/event_related_fmri.py b/doc/examples/event_related_fmri.py
index 1aaa597..44cb399 100644
--- a/doc/examples/event_related_fmri.py
+++ b/doc/examples/event_related_fmri.py
@@ -7,7 +7,7 @@ Event-related fMRI
==================
Extracting the average time-series from one signal, time-locked to the
-occurence of some type of event in another signal is a very typical operation
+occurrence of some type of event in another signal is a very typical operation
in the analysis of time-series from neuroscience experiments. Therefore, we
have an additional example of this kind of analysis in :ref:`grasshopper`
@@ -27,7 +27,7 @@ use in the analysis:
import os
-from matplotlib.mlab import csv2rec
+import numpy as np
import matplotlib.pyplot as plt
import nitime
@@ -40,14 +40,15 @@ len_et = 15 # This is given in number of samples, not time!
"""
-Next, we load the data into a recarray from the csv file, using csv2rec
+Next, we load the data into an array from the csv file, using ``np.loadtxt``
"""
data_path = os.path.join(nitime.__path__[0], 'data')
-data = csv2rec(os.path.join(data_path, 'event_related_fmri.csv'))
+fname = os.path.join(data_path, 'event_related_fmri.csv')
+data = np.genfromtxt(fname, dtype=float, delimiter=',', names=True)
"""
@@ -56,7 +57,7 @@ We initialize TimeSeries objects with the data and the TR:
One TimeSeries is initialized for the BOLD data:
"""
-t1 = ts.TimeSeries(data.bold, sampling_interval=TR)
+t1 = ts.TimeSeries(data['bold'], sampling_interval=TR)
"""
@@ -64,7 +65,7 @@ And another one for the events (the different stimuli):
"""
-t2 = ts.TimeSeries(data.events, sampling_interval=TR)
+t2 = ts.TimeSeries(data['events'], sampling_interval=TR)
"""
@@ -72,8 +73,8 @@ Note that this example uses the EventRelated analyzer (also used in the
:ref:`grasshopper` example), but here, instead of providing an :class:`Events`
object as input, another :class:`TimeSeries` object is provided, containing an
equivalent time-series with the same dimensions as the time-series on which the
-analysis is done, with '0' wherever no event of interest occured and an integer
-wherever an even of interest occured (sequential different integers for the
+analysis is done, with '0' wherever no event of interest occurred and an integer
+wherever an even of interest occurred (sequential different integers for the
different kinds of events).
"""
diff --git a/doc/examples/filtering_fmri.py b/doc/examples/filtering_fmri.py
index 39cd99b..5b1f308 100644
--- a/doc/examples/filtering_fmri.py
+++ b/doc/examples/filtering_fmri.py
@@ -28,7 +28,6 @@ import os
import numpy as np
import matplotlib.pyplot as plt
-from matplotlib.mlab import csv2rec
"""
@@ -60,7 +59,8 @@ TR = 1.89
data_path = os.path.join(nitime.__path__[0], 'data')
-data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
+data = np.loadtxt(os.path.join(data_path, 'fmri_timeseries.csv'),
+ skiprows=1, delimiter=',')
# Extract ROI information from the csv file headers:
roi_names = np.array(data_rec.dtype.names)
diff --git a/doc/examples/granger_fmri.py b/doc/examples/granger_fmri.py
index 8c2b958..8df82a7 100644
--- a/doc/examples/granger_fmri.py
+++ b/doc/examples/granger_fmri.py
@@ -20,16 +20,16 @@ terms of a time-delayed auto-regressive model of the form:
x_t = \sum_{i=1}^{n}a_i x_{t-i} + \epsilon_t
-Here, a the past behaviour of a single time-series is used in order to predict
+Here, the past behaviour of a single time-series is used in order to predict
the current value of the time-series. In Granger 'causality' analysis, we test
whether the addition of a prediction of the time-series from another
-time-series through a multi-variate auto-regressive model may improve our
+time-series through a multivariate auto-regressive model may improve our
prediction of the present behavior of the time-series (reducing the value of
the error term $\epsilon_t$):
.. math::
- x_t = \sum_{i=1}^{n}a_i x_{t-i} + b_i y_{t-i} + \epsilon_t
+ x_t = \sum_{i=1}^{n}(a_i x_{t-i} + b_i y_{t-i}) + \epsilon_t
In our implementation of the algorithms used for this analysis, we follow
@@ -50,7 +50,6 @@ import os
import numpy as np
import matplotlib.pyplot as plt
-from matplotlib.mlab import csv2rec
import nitime
import nitime.analysis as nta
@@ -77,7 +76,9 @@ We read in the resting state fMRI data into a recarray from a csv file:
data_path = os.path.join(nitime.__path__[0], 'data')
-data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
+fname = os.path.join(data_path, 'fmri_timeseries.csv')
+
+data_rec = np.genfromtxt(fname, dtype=float, delimiter=',', names=True)
roi_names = np.array(data_rec.dtype.names)
nseq = len(roi_names)
@@ -110,7 +111,7 @@ G = nta.GrangerAnalyzer(time_series, order=1)
"""
For comparison, we also initialize a CoherenceAnalyzer and a
-CorrelationAnalyzer, with the same TimeSeries object
+CorrelationAnalyzer, with the same TimeSeries object:
"""
@@ -175,7 +176,7 @@ misattribution of the direction and magnitude of dependence between time-series
in fMRI data (for a particularly extreme example of that see
[David2008]_). Therefore, as suggested by Roebroeck et al. [Roebroeck2005]_ and
[Kayser2009]_ we turn to examine the difference between $F_{x\rightarrow y}$ and
-$F_{y\rightarrow x}$
+$F_{y\rightarrow x}$.
"""
@@ -205,7 +206,7 @@ References
.. [Pearl2009] J. Pearl (2009). Causal inference in statistics: An
overview. Statistics surveys 3: 96-146.
-.. [Ding2008] M. Ding, Y. Chen, S.L. Bressler (2006) Granger causality:
+.. [Ding2006] M. Ding, Y. Chen, S.L. Bressler (2006) Granger causality:
basic theory and application to neuroscience. In Handbook of Time Series
Analysis, ed. B. Schelter, M. Winterhalder, and J. Timmer, Wiley-VCH
Verlage, 2006: 451-474
diff --git a/doc/examples/grasshopper.py b/doc/examples/grasshopper.py
index 38b6fff..a930b3a 100644
--- a/doc/examples/grasshopper.py
+++ b/doc/examples/grasshopper.py
@@ -8,7 +8,7 @@
=====================================
Extracting the average time-series from one signal, time-locked to the
-occurence of some type of event in another signal is a very typical operation in
+occurrence of some type of event in another signal is a very typical operation in
the analysis of time-series from neuroscience experiments. Therefore, we have
an additional example of this kind of analysis in :ref:`et-fmri`
diff --git a/doc/examples/mtm_baseband_power.py b/doc/examples/mtm_baseband_power.py
index 4fd1525..bcaac25 100644
--- a/doc/examples/mtm_baseband_power.py
+++ b/doc/examples/mtm_baseband_power.py
@@ -12,7 +12,7 @@ interest in neuroimaging when finding the lowpass power envelope and the
instantaneous phase. The traditional technique uses the Hilbert
transform to find the analytic signal. However, this approach suffers
problems of bias and reliability, much like the periodogram suffers in
-PSD estimation. Once again, a multi-taper approach can provide an
+PSD estimation. Once again, a multitaper approach can provide an
estimate with lower variance.
The following demonstrates the use of spectra of multiple windows to
@@ -27,8 +27,10 @@ import nitime.utils as nt_ut
import matplotlib.pyplot as pp
"""
+
We'll set up a test signal with a red spectrum (integrated Gaussian
noise).
+
"""
N = 10000
@@ -37,20 +39,26 @@ NW = 40
W = float(NW)/N
"""
+
Create a nearly lowpass band-limited signal.
+
"""
s = np.cumsum( np.random.randn(N) )
"""
+
Strictly enforce the band-limited property in this signal.
+
"""
(b, a) = signal.butter(3, W, btype='lowpass')
slp = signal.lfilter(b, a, s)
"""
+
Modulate both signals away from baseband.
+
"""
s_mod = s * np.cos(2*np.pi*np.arange(N) * float(200) / N)
@@ -58,7 +66,9 @@ slp_mod = slp * np.cos(2*np.pi*np.arange(N) * float(200) / N)
fm = int( np.round(float(200) * nfft / N) )
"""
+
Create Slepians with the desired bandpass resolution (2W).
+
"""
(dpss, eigs) = nt_alg.dpss_windows(N, NW, 2*NW)
@@ -72,6 +82,7 @@ Test 1
We'll compare multitaper baseband power estimation with regular
Hilbert transform method under actual narrowband conditions.
+
"""
# MT method
@@ -140,12 +151,13 @@ pp.gcf().tight_layout()
Here we see that since the underlying signal is not truly narrowband,
the broadband bias is corrupting the Hilbert transform estimation of
-the complex demodulate. However the multi-taper estimate clearly
+the complex demodulate. However the multitaper estimate clearly
remains lowpass.
"""
"""
+
Another property of computing the complex demodulate from the spectra
of multiple windows is that all bandpasses can be computed. In the
above examples, we were only taking a slice from the modulation
@@ -154,6 +166,7 @@ bandpasses at various frequencies. Note here, though, that our
bandwidth is set by the Slepian sequences we used for analysis. The
following plot shows a family of complex demodulates at frequencies
near the modulation frequency.
+
"""
### Show a family of baseband demodulations from the multitaper method
diff --git a/doc/examples/mtm_harmonic_test.py b/doc/examples/mtm_harmonic_test.py
index cd5a42d..38cb3c8 100644
--- a/doc/examples/mtm_harmonic_test.py
+++ b/doc/examples/mtm_harmonic_test.py
@@ -29,9 +29,11 @@ import nitime.utils as nt_ut
import matplotlib.pyplot as pp
"""
+
We will set up a test signal with 3 harmonic components within
Gaussian noise. The line components must be sufficiently resolved
-given the multi-taper bandwidth of 2NW.
+given the multitaper bandwidth of 2NW.
+
"""
N = 10000
@@ -43,16 +45,20 @@ while np.any( np.diff(lines) < 2*NW ):
lines = lines.astype('d')
"""
+
The harmonic test should find *exact* frequencies if they were to fall
on the FFT grid. (Try commenting the following to see.) In the
scenario of real sampled data, increasing the number of FFT points can
help to locate the line components.
+
"""
lines += np.random.randn(3) # displace from grid locations
"""
+
Now proceed to specify the frequencies, phases, and amplitudes.
+
"""
lines /= 2.0**(fft_pow-2) # ensure they are well separated
@@ -61,11 +67,13 @@ phs = np.random.rand(3) * 2 * np.pi
amps = np.sqrt(2)/2 + np.abs( np.random.randn(3) )
"""
+
Set the RMS noise power here. Strategies to detect harmonics in low
SNR include improving the reliability of the spectral estimate
(increasing NW) and/or increasing the number of FFT points. Note that
the former option will limit the ability to resolve lines at nearby
frequencies.
+
"""
nz_sig = 1
@@ -77,7 +85,9 @@ nz = np.random.randn(N) * nz_sig
sig = harmonic + nz
"""
+
Take a look at our mock signal.
+
"""
pp.figure()
@@ -100,11 +110,13 @@ pp.gcf().tight_layout()
"""
"""
+
Here we'll use the :func:`utils.detect_lines` function with the given
Slepian properties (NW), and we'll ensure that we limit spectral bias
by choosing Slepians with concentration factors greater than 0.9. The
arrays returned include the detected line frequencies (f) and their
complex coefficients (b). The frequencies are normalized from :math:`(0,\frac{1}{2})`
+
"""
f, b = nt_ut.detect_lines(sig, (NW, 2*NW), low_bias=True, NFFT=2**fft_pow)
@@ -114,11 +126,11 @@ pp.figure()
pp.subplot(211)
pp.plot(harmonics.T, 'c', linewidth=3)
pp.plot(h_est.T, 'r--', linewidth=2)
-pp.title('%d lines detected'%h_est.shape[0])
+pp.title('%d lines detected' % h_est.shape[0])
pp.xlim(*(np.array([0.2, 0.3])*N).astype('i'))
pp.subplot(212)
err = harmonic - np.sum(h_est, axis=0)
-pp.plot( err**2 )
+pp.plot(err**2)
pp.title('Error signal')
pp.show()
@@ -138,7 +150,7 @@ phs_err = np.linalg.norm(phs_est - phs)**2
amp_err = np.linalg.norm(amps - 2*np.abs(b))**2 / np.linalg.norm(amps)**2
freq_err = np.linalg.norm(lines - f)**2
-print 'freqs:', lines, '\testimated:', f, '\terr: %1.3e'%freq_err
-print 'amp:', amps, '\testimated:', 2*np.abs(b), '\terr: %1.3e'%amp_err
-print 'phase:', phs, '\testimated:', phs_est, '\terr: %1.3e'%phs_err
-print 'MS error over noise: %1.3e'%(np.mean(err**2)/nz_sig**2,)
+print('freqs:', lines, '\testimated:', f, '\terr: %1.3e' % freq_err)
+print('amp:', amps, '\testimated:', 2*np.abs(b), '\terr: %1.3e' % amp_err)
+print('phase:', phs, '\testimated:', phs_est, '\terr: %1.3e' % phs_err)
+print('MS error over noise: %1.3e' % (np.mean(err**2)/nz_sig**2,))
diff --git a/doc/examples/multi_taper_coh.py b/doc/examples/multi_taper_coh.py
index dabf8cf..c91085e 100755..100644
--- a/doc/examples/multi_taper_coh.py
+++ b/doc/examples/multi_taper_coh.py
@@ -3,19 +3,18 @@
.. _multi-taper-coh:
-================================
-Multi-taper coherence estimation
-================================
+===============================
+Multitaper coherence estimation
+===============================
Coherence estimation can be done using windowed-spectra. This is the method
-used in the example :ref:`resting-state`. In addition, multi-taper spectral
+used in the example :ref:`resting-state`. In addition, multitaper spectral
estimation can be used in order to calculate coherence and also confidence
intervals for the coherence values that result (see :ref:`multi-taper-psd`)
-
The data analyzed here is an fMRI data-set contributed by Beth Mormino. The
-data is taken from a single subject in a"resting-state" scan, in which subjects
+data is taken from a single subject in a "resting-state" scan, in which subjects
are fixating on a cross and maintaining alert wakefulness, but not performing
any other behavioral task.
@@ -29,7 +28,6 @@ import os
import numpy as np
import matplotlib.pyplot as plt
-from matplotlib.mlab import csv2rec
import scipy.stats.distributions as dist
from scipy import fftpack
@@ -53,7 +51,9 @@ We read in the data into a recarray from a csv file:
data_path = os.path.join(nitime.__path__[0], 'data')
-data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
+fname = os.path.join(data_path, 'fmri_timeseries.csv')
+
+data_rec = np.genfromtxt(fname, dtype=float, delimiter=',', names=True)
"""
@@ -99,7 +99,7 @@ tapers, eigs = alg.dpss_windows(n_samples, NW, K)
"""
-We multiply the data by the tapers and derive the fourier transform and the
+We multiply the data by the tapers and derive the Fourier transform and the
magnitude of the squared spectra (the power) for each tapered time-series:
"""
@@ -118,7 +118,7 @@ the spectrum (the other half is equal):
"""
-L = n_samples / 2 + 1
+L = n_samples // 2 + 1
sides = 'onesided'
"""
@@ -129,7 +129,7 @@ We estimate adaptive weighting of the tapers, based on the data (see
"""
w = np.empty((nseq, K, L))
-for i in xrange(nseq):
+for i in range(nseq):
w[i], _ = utils.adaptive_weights(tspectra[i], eigs, sides=sides)
@@ -151,8 +151,8 @@ Looping over the ROIs:
"""
-for i in xrange(nseq):
- for j in xrange(i):
+for i in range(nseq):
+ for j in range(i, nseq):
"""
@@ -253,18 +253,16 @@ coh = np.mean(coh_mat[:, :, freq_idx], -1) # Averaging on the last dimension
"""
-The next line calls the visualization routine which displays the data
+The next line calls the visualization routine which displays the data:
"""
-
fig01 = drawmatrix_channels(coh,
roi_names,
size=[10., 10.],
color_anchor=0,
title='MTM Coherence')
-
"""
.. image:: fig/multi_taper_coh_01.png
@@ -275,16 +273,14 @@ We start by initializing a TimeSeries object with this data and with the
sampling_interval provided above. We set the metadata 'roi' field with the ROI
names.
-
"""
T = TimeSeries(pdata, sampling_interval=TR)
T.metadata['roi'] = roi_names
-
"""
-We initialize an MTCoherenceAnalyzer object with the TimeSeries object
+We initialize an MTCoherenceAnalyzer object with the TimeSeries object:
"""
@@ -298,8 +294,8 @@ The relevant indices in the Analyzer object are derived:
freq_idx = np.where((C2.frequencies > 0.02) * (C2.frequencies < 0.15))[0]
-
"""
+
The call to C2.coherence triggers the computation and this is averaged over the
frequency range of interest in the same line and then displayed:
@@ -312,15 +308,13 @@ fig02 = drawmatrix_channels(coh,
color_anchor=0,
title='MTCoherenceAnalyzer')
-
"""
.. image:: fig/multi_taper_coh_02.png
-
For comparison, we also perform the analysis using the standard
CoherenceAnalyzer object, which does the analysis using Welch's windowed
-periodogram, instead of the multi-taper spectral estimation method (see
+periodogram, instead of the multitaper spectral estimation method (see
:ref:`resting_state` for a more thorough analysis of this data using this
method):
@@ -338,15 +332,12 @@ fig03 = drawmatrix_channels(coh,
color_anchor=0,
title='CoherenceAnalyzer')
-
"""
.. image:: fig/multi_taper_coh_03.png
-
plt.show() is called in order to display the figures:
-
"""
plt.show()
diff --git a/doc/examples/multi_taper_spectral_estimation.py b/doc/examples/multi_taper_spectral_estimation.py
index 0935381..cd54f7c 100644
--- a/doc/examples/multi_taper_spectral_estimation.py
+++ b/doc/examples/multi_taper_spectral_estimation.py
@@ -2,9 +2,9 @@
.. _multi-taper-psd:
-===============================
-Multi-taper spectral estimation
-===============================
+==============================
+Multitaper spectral estimation
+==============================
The distribution of power in a signal, as a function of frequency, known as the
power spectrum (or PSD, for power spectral density) can be estimated using
@@ -32,7 +32,7 @@ stationarity of the signal. In this method, a sliding window is applied to
different parts of the signal and the windowed spectrum is averaged from these
different samples. This is sometimes referred to as Welch's periodogram
[Welch1967]_ and it is the default method used in
-:func:`algorithms.get_spectra` (with the hanning window as the window function
+:func:`algorithms.get_spectra` (with the Hanning window as the window function
used and no overlap between the windows). However, it may lead to the
following problem:
@@ -50,9 +50,7 @@ following problem:
in the frequency domain with the spectrum of the boxcar window. The spectral
leakage induced by this operation is demonstrated in the following example.
-
-We start by importing the modules/functions we will need in this example
-
+We start by importing the modules/functions we will need in this example:
"""
@@ -67,7 +65,9 @@ from nitime.viz import winspect
from nitime.viz import plot_spectral_estimate
"""
+
For demonstration, we will use a window of 128 points:
+
"""
npts = 128
@@ -95,6 +95,7 @@ windows. Other windows have been designed in order to optimize the amount of
spectral leakage and limit it to certain parts of the spectrum. The following
example demonstrates the spectral leakage for several different windows
(including the boxcar):
+
"""
fig02 = plt.figure()
@@ -214,12 +215,10 @@ fig03 = plot_spectral_estimate(freqs, psd, (d_psd,), elabels=("Periodogram",))
.. image:: fig/multi_taper_spectral_estimation_03.png
-
Next, we use Welch's periodogram, by applying :func:`tsa.get_spectra`. Note
-that we explicitely provide the function with a 'method' dict, which specifies
+that we explicitly provide the function with a 'method' dict, which specifies
the method used in order to calculate the PSD, but the default method is 'welch'.
-
"""
welch_freqs, welch_psd = tsa.get_spectra(ar_seq,
@@ -235,8 +234,7 @@ fig04 = plot_spectral_estimate(freqs, psd, (welch_psd,), elabels=("Welch",))
.. image:: fig/multi_taper_spectral_estimation_04.png
-
-Next, we use the multi-taper estimation method. We estimate the spectrum:
+Next, we use the multitaper estimation method. We estimate the spectrum:
"""
@@ -260,7 +258,6 @@ Kmax = nu[0] / 2
We calculate a Chi-squared model 95% confidence interval 2*Kmax degrees of
freedom (see [Percival1993]_ eq 258)
-
"""
p975 = dist.chi2.ppf(.975, 2 * Kmax)
@@ -272,13 +269,12 @@ l2 = ln2db * np.log(2 * Kmax / p025)
hyp_limits = (psd_mt + l1, psd_mt + l2)
fig05 = plot_spectral_estimate(freqs, psd, (psd_mt,), hyp_limits,
- elabels=(r"MT with $\chi^{2}$ 95% interval",))
+ elabels=(r"MT with $\chi^{2}$ 95% interval",))
"""
.. image:: fig/multi_taper_spectral_estimation_05.png
-
An iterative method ([Thomson2007]_) can be used in order to adaptively set the
weighting of the different tapers, according to the actual spectral
concentration in the given signal (and not only the theoretical spectral
@@ -302,7 +298,6 @@ hyp_limits = (adaptive_psd_mt + l1, adaptive_psd_mt + l2)
fig06 = plot_spectral_estimate(freqs, psd, (adaptive_psd_mt,), hyp_limits,
elabels=('MT with adaptive weighting and 95% interval',))
-
"""
.. image:: fig/multi_taper_spectral_estimation_06.png
@@ -328,7 +323,7 @@ measurement (one tapered spectrum) out.
| **pseudovalues**
| :math:`\hat{\theta}_i = n\hat{\theta} - (n-1)\hat{\theta}_{-i}`
-The jackknifed esimator is computed as:
+The jackknifed estimator is computed as:
:math:`\tilde{\theta} = \dfrac{1}{n}\sum_i \hat{\theta}_i = n\hat{\theta} - \dfrac{n-1}{n}\sum_i \hat{\theta}_{-i}`
@@ -351,7 +346,6 @@ fig07 = plot_spectral_estimate(freqs, psd, (psd_mt,),
jk_limits,
elabels=('MT with JK 95% interval',))
-
"""
.. image:: fig/multi_taper_spectral_estimation_07.png
@@ -364,7 +358,6 @@ jack-knifing procedure.
"""
-
_, _, adaptive_jk_var = tsa.multi_taper_psd(
ar_seq, adaptive=True, jackknife=True
)
@@ -375,9 +368,8 @@ jk_p = (dist.t.ppf(.975, Kmax - 1) * np.sqrt(adaptive_jk_var)) * ln2db
adaptive_jk_limits = (adaptive_psd_mt - jk_p, adaptive_psd_mt + jk_p)
fig08 = plot_spectral_estimate(freqs, psd, (adaptive_psd_mt,),
- adaptive_jk_limits,
- elabels=('adaptive-MT with JK 95% interval',))
-
+ adaptive_jk_limits,
+ elabels=('adaptive-MT with JK 95% interval',))
"""
diff --git a/doc/examples/resting_state_fmri.py b/doc/examples/resting_state_fmri.py
index acf29ea..36c4e95 100644
--- a/doc/examples/resting_state_fmri.py
+++ b/doc/examples/resting_state_fmri.py
@@ -31,7 +31,6 @@ import os
#Import from other libraries:
import numpy as np
import matplotlib.pyplot as plt
-from matplotlib.mlab import csv2rec
import nitime
#Import the time-series objects:
@@ -49,15 +48,17 @@ f_ub = 0.15
"""
-We use csv2rec to read the data in from file to a recarray:
+We use Numpy to read the data in from file to a recarray:
"""
data_path = os.path.join(nitime.__path__[0], 'data')
-data_rec = csv2rec(os.path.join(data_path, 'fmri_timeseries.csv'))
+data_rec = np.genfromtxt(os.path.join(data_path, 'fmri_timeseries.csv'),
+ names=True, delimiter=',')
"""
+
This data structure contains in its dtype a field 'names', which contains the
first row in each column. In this case, that is the labels of the ROIs from
which the data in each column was extracted. The data from the recarray is
@@ -80,7 +81,6 @@ for n_idx, roi in enumerate(roi_names):
#Normalize the data:
data = percent_change(data)
-
"""
We initialize a TimeSeries object from the normalized data:
@@ -90,21 +90,21 @@ We initialize a TimeSeries object from the normalized data:
T = TimeSeries(data, sampling_interval=TR)
T.metadata['roi'] = roi_names
-
"""
First, we examine the correlations between the time-series extracted from
different parts of the brain. The following script extracts the data (using the
-draw_matrix function, displaying the correlation matrix with the ROIs labeled.
+drawmatrix_channels function, displaying the correlation matrix with the ROIs
+labeled.
"""
-#Initialize the correlation analyzer
+# Initialize the correlation analyzer
C = CorrelationAnalyzer(T)
-#Display the correlation matrix
-fig01 = drawmatrix_channels(C.corrcoef, roi_names, size=[10., 10.], color_anchor=0)
-
+# Display the correlation matrix
+fig01 = drawmatrix_channels(C.corrcoef, roi_names, size=[10., 10.],
+ color_anchor=0)
"""
@@ -122,7 +122,7 @@ correlation values related to each other? The right caudate and left putamen
seem to have a moderately low correlation value. One way to examine this
question is by looking at the temporal structure of the cross-correlation
functions. In order to do that, from the CorrelationAnalyzer object, we extract
-the normalized cross-correlation function. This results in another TimeSeries`
+the normalized cross-correlation function. This results in another TimeSeries
object, which contains the full time-series of the cross-correlation between
any combination of time-series from the different channels in the time-series
object. We can pass the resulting object, together with a list of indices to
@@ -133,16 +133,15 @@ series:
xc = C.xcorr_norm
-idx_lcau = np.where(roi_names == 'lcau')[0]
-idx_rcau = np.where(roi_names == 'rcau')[0]
-idx_lput = np.where(roi_names == 'lput')[0]
-idx_rput = np.where(roi_names == 'rput')[0]
+idx_lcau = np.where(roi_names == 'LCau')[0]
+idx_rcau = np.where(roi_names == 'RCau')[0]
+idx_lput = np.where(roi_names == 'LPut')[0]
+idx_rput = np.where(roi_names == 'RPut')[0]
fig02 = plot_xcorr(xc,
((idx_lcau, idx_rcau),
(idx_lcau, idx_lput)),
- line_labels=['rcau', 'lput'])
-
+ line_labels=['RCau', 'LPut'])
"""
@@ -189,7 +188,6 @@ computation:
\phi(\lambda) = arg [R_{xy} (\lambda)] = arg [f_{xy} (\lambda)]
-
This value can be used in order to infer which area is leading and which area
is lagging (according to the sign of the relative phase) and, can be used to
compute the temporal delay between activity in one ROI and the other.
@@ -222,6 +220,7 @@ averaged across all these frequency bands.
freq_idx = np.where((C.frequencies > f_lb) * (C.frequencies < f_ub))[0]
"""
+
The C.coherence attribute is an ndarray of dimensions $n_{ROI}$ by $n_{ROI}$ by
$n_{frequencies}$.
@@ -230,8 +229,8 @@ bands of interest and pass that to the visualization function:
"""
-
-coh = np.mean(C.coherence[:, :, freq_idx], -1) # Averaging on the last dimension
+# Averaging on the last dimension:
+coh = np.mean(C.coherence[:, :, freq_idx], -1)
fig03 = drawmatrix_channels(coh, roi_names, size=[10., 10.], color_anchor=0)
"""
@@ -255,7 +254,6 @@ Extract the coherence and average across the same frequency bands as before:
"""
-
coh = np.mean(coh[:, :, freq_idx], -1) # Averaging on the last dimension
"""
@@ -312,18 +310,15 @@ between time-series $x$ and time-series $y$, *given* time series $r$):
"""
-
idx3 = np.hstack(16 * [idx_lcau])
coh = C.coherence_partial[idx1, idx2, idx3].reshape(4, 4, C.frequencies.shape[0])
coh = np.mean(coh[:, :, freq_idx], -1)
"""
-
Again, we visualize the result, using both the :func:`viz.drawgraph_channels`
and the :func:`drawmatrix_channels` functions:
-
"""
fig05 = drawgraph_channels(coh, roi_names[idx])
@@ -333,10 +328,8 @@ fig06 = drawmatrix_channels(coh, roi_names[idx], color_anchor=0)
.. image:: fig/resting_state_fmri_05.png
-
.. image:: fig/resting_state_fmri_06.png
-
As can be seen, the resulting partial coherence between left putamen and right
caudate, given the activity in the left caudate is smaller than the coherence
between these two areas, suggesting that part of this coherence can be
@@ -344,23 +337,22 @@ explained by their common connection to the left caudate.
XXX Add description of calculation of temporal delay here.
-
We call plt.show() in order to display the figures:
"""
plt.show()
-
"""
.. [Sun2005] F.T. Sun and L.M. Miller and M. D'Esposito(2005). Measuring
- temporal dynamics of functional networks using phase spectrum of
- fMRI data. Neuroimage, 28: 227-37.
+ temporal dynamics of functional networks using phase spectrum of
+ fMRI data. Neuroimage, 28: 227-37.
.. [Silver2010] M.A Silver, AN Landau, TZ Lauritzen, W Prinzmetal, LC
Robertson(2010) Isolating human brain functional connectivity associated
with a specific cognitive process, in Human Vision and Electronic Imaging
XV, edited by B.E. Rogowitz and T.N. Pappas, Proceedings of SPIE, Volume
7527, pp. 75270B-1 to 75270B-9
+
"""
diff --git a/doc/examples/snr_example.py b/doc/examples/snr_example.py
index 1a2c2a5..1e06ead 100644
--- a/doc/examples/snr_example.py
+++ b/doc/examples/snr_example.py
@@ -1,6 +1,6 @@
"""
==============================================
-Caclulation of Signal to noise and information
+Calculation of signal-to-noise and information
==============================================
This method is based on ideas described in [Borst1999]_ (Figure 2) and
@@ -21,9 +21,9 @@ channel and is an upper bound for all other cases.
The signal power is estimated as the power of the mean response to repeated
presentations of the same signal and the noise power is calculated as the
-average of the power in the deviation from this average in each trial
+average of the power in the deviation from this average in each trial.
-We import the neccesary modules:
+We import the necessary modules:
"""
@@ -59,8 +59,8 @@ fig_tseries = []
"""
We add different levels of noise to the ar_seq variable, in order to
-demonstrate the effects of adding noise on signal to noise ratio, as well as
-the calculated information
+demonstrate the effects of adding noise on signal-to-noise ratio, as well as
+the calculated information.
"""
@@ -81,7 +81,7 @@ for idx, noise in enumerate([1, 10, 50, 100]):
"""
- for trial in xrange(n_trials):
+ for trial in range(n_trials):
sample[-1][trial] += np.random.randn(sample[-1][trial].shape[0]) * noise
"""
@@ -124,10 +124,9 @@ for idx, noise in enumerate([1, 10, 50, 100]):
A special visualization function :func:`viz.plot_snr` is used in order to
display the signal power (blue) and the noise power (green), both in the
-left sub-plot. In addition, the SNR (blue) and the cumulative information
+left subplot. In addition, the SNR (blue) and the cumulative information
(as a function of frequency bands, starting from low frequencies, in red)
-are dislplayed in the right subplot.
-
+are displayed in the right subplot.
.. image:: fig/snr_example_02.png
@@ -137,8 +136,8 @@ signal.
.. image:: fig/snr_example_03.png
The signal power remains rather similar, but the noise power increases
-(across all bands). As a consequence, the signal to noise ratio decreases and the
-accumulated information decreases
+(across all bands). As a consequence, the signal-to-noise ratio decreases and the
+accumulated information decreases.
.. image:: fig/snr_example_04.png
@@ -158,8 +157,9 @@ overwhelmed with noise:
.. image:: fig/snr_example_08.png
Finally, we use :func:`plot_snr_diff` in order to compare information
-transmission (on the left) and the signal to noise ratio (on the right) between
+transmission (on the left) and the signal-to-noise ratio (on the right) between
the two last noise levels:
+
"""
ts1 = ts.TimeSeries(sample[-1], sampling_rate=1.)
@@ -171,15 +171,13 @@ plt.show()
.. image:: fig/snr_example_09.png
-
References
- .. [Hsu2004] Hsu A, Borst A and Theunissen, FE (2004) Quantifying
- variability in neural responses ans its application for the validation of
- model predictions. Network: Comput Neural Syst 15:91-109
-
- .. [Borst1999] Borst A and Theunissen FE (1999) Information theory and
- neural coding. Nat Neurosci 2:947-957
+.. [Hsu2004] Hsu A, Borst A and Theunissen, FE (2004) Quantifying
+variability in neural responses ans its application for the validation of
+model predictions. Network: Comput Neural Syst 15:91-109
+.. [Borst1999] Borst A and Theunissen FE (1999) Information theory and
+neural coding. Nat Neurosci 2:947-957
"""
diff --git a/doc/index.rst b/doc/index.rst
index 20ba3dd..7737306 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -9,11 +9,11 @@ It contains a core of numerical algorithms for time-series analysis both in the
time and spectral domains, a set of container objects to represent time-series,
and auxiliary objects that expose a high level interface to the numerical
machinery and make common analysis tasks easy to express with compact and
-semantically clear code.
+semantically clear code.
.. toctree::
:maxdepth: 1
-
+
documentation
whatsnew/index
examples/index
diff --git a/doc/links_names.txt b/doc/links_names.txt
index 880ee1f..7f5d0e2 100644
--- a/doc/links_names.txt
+++ b/doc/links_names.txt
@@ -2,7 +2,7 @@
and name substitutions. It may be included in many files,
therefore it should only contain link targets and name
substitutions. Try grepping for "^\.\. _" to find plausible
- candidates for this list.
+ candidates for this list.
.. NOTE: reST targets are
__not_case_sensitive__, so only one target definition is needed for
@@ -13,8 +13,8 @@
.. _`Brain Imaging Center`: http://bic.berkeley.edu/
.. _`Nibabel`: http://nipy.org/nibabel
-.. _gh-download: http://github.com/nipy/nitime/downloads
-.. _gh-archive: http://github.com/nipy/nitime/archives/master
+.. _gh-download: https://github.com/nipy/nitime/releases
+.. _gh-archive: https://github.com/nipy/nitime/archive/master.zip
.. _nitime-pypi: http://pypi.python.org/pypi/nitime
.. Documentation tools
@@ -59,6 +59,9 @@
.. _nose: http://somethingaboutorange.com/mrl/projects/nose
.. _`python coverage tester`: http://nedbatchelder.com/code/modules/coverage.html
.. _easy-install: http://packages.python.org/distribute/easy_install.html
+.. _pip: https://pip.pypa.io/en/stable/
+.. _anaconda: https://anaconda.org/
+.. _conda: http://conda.pydata.org/
.. Other python projects
.. _numpy: http://www.scipy.org/NumPy
@@ -72,6 +75,7 @@
.. _mayavi: http://mayavi.sourceforge.net/
.. _sympy: http://code.google.com/p/sympy/
.. _networkx: http://networkx.lanl.gov/
+.. _cython: http://cython.org
.. Python imaging projects
.. _PyMVPA: http://www.pymvpa.org
diff --git a/doc/news.rst b/doc/news.rst
index 6781322..680a910 100644
--- a/doc/news.rst
+++ b/doc/news.rst
@@ -2,7 +2,9 @@
Nitime news
=============
-June XX: version 0.5 released.
+February 7th, 2016: version 0.6 released
+
+June 13, 2014: version 0.5 released.
June 19 2012: Version 0.4 released.
diff --git a/doc/sphinxext/docscrape.py b/doc/sphinxext/docscrape.py
index 3999cc9..b1e1c1a 100644
--- a/doc/sphinxext/docscrape.py
+++ b/doc/sphinxext/docscrape.py
@@ -6,7 +6,10 @@ import inspect
import textwrap
import re
import pydoc
-from StringIO import StringIO
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
from warnings import warn
class Reader(object):
@@ -112,8 +115,8 @@ class NumpyDocString(object):
def __getitem__(self,key):
return self._parsed_data[key]
- def __setitem__(self,key,val):
- if not self._parsed_data.has_key(key):
+ def __setitem__(self, key,val):
+ if not key in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
@@ -183,7 +186,7 @@ class NumpyDocString(object):
return params
-
+
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
@@ -216,7 +219,7 @@ class NumpyDocString(object):
current_func = None
rest = []
-
+
for line in content:
if not line.strip(): continue
@@ -258,7 +261,7 @@ class NumpyDocString(object):
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
-
+
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
@@ -275,7 +278,7 @@ class NumpyDocString(object):
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
-
+
def _parse(self):
self._doc.reset()
self._parse_summary()
@@ -413,23 +416,23 @@ class FunctionDoc(NumpyDocString):
doc = inspect.getdoc(func) or ''
try:
NumpyDocString.__init__(self, doc)
- except ValueError, e:
- print '*'*78
- print "ERROR: '%s' while parsing `%s`" % (e, self._f)
- print '*'*78
- #print "Docstring follows:"
- #print doclines
- #print '='*78
+ except ValueError as e:
+ print('*'*78)
+ print("ERROR: '%s' while parsing `%s`" % (e, self._f))
+ print('*'*78)
+ #print("Docstring follows:")
+ #print(doclines)
+ #print('='*78)
if not self['Signature']:
func, func_name = self.get_func()
try:
# try to read signature
- argspec = inspect.getargspec(func)
- argspec = inspect.formatargspec(*argspec)
+ argspec = inspect.signature(func)
+ argspec = str(argspec)
argspec = argspec.replace('*','\*')
signature = '%s%s' % (func_name, argspec)
- except TypeError, e:
+ except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
@@ -440,7 +443,7 @@ class FunctionDoc(NumpyDocString):
else:
func = self._f
return func, func_name
-
+
def __str__(self):
out = ''
@@ -451,8 +454,8 @@ class FunctionDoc(NumpyDocString):
'meth': 'method'}
if self._role:
- if not roles.has_key(self._role):
- print "Warning: invalid role %s" % self._role
+ if not self._role in roles:
+ print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
func_name)
@@ -488,10 +491,8 @@ class ClassDoc(NumpyDocString):
out += "\n\n"
#for m in self.methods:
- # print "Parsing `%s`" % m
+ # print("Parsing `%s`" % m)
# out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
# out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
return out
-
-
diff --git a/doc/sphinxext/github.py b/doc/sphinxext/github.py
index d6215e6..2793bf9 100644
--- a/doc/sphinxext/github.py
+++ b/doc/sphinxext/github.py
@@ -19,6 +19,7 @@ Authors
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
+from sphinx.util import logging
def make_link_node(rawtext, app, type, slug, options):
"""Create a link to a github resource.
@@ -36,7 +37,7 @@ def make_link_node(rawtext, app, type, slug, options):
raise AttributeError
if not base.endswith('/'):
base += '/'
- except AttributeError, err:
+ except AttributeError as err:
raise ValueError('github_project_url configuration value is not set (%s)' % str(err))
ref = base + type + '/' + slug + '/'
@@ -133,7 +134,7 @@ def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
raise AttributeError
if not base.endswith('/'):
base += '/'
- except AttributeError, err:
+ except AttributeError as err:
raise ValueError('github_project_url configuration value is not set (%s)' % str(err))
ref = base + text
@@ -143,13 +144,16 @@ def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
def setup(app):
"""Install the plugin.
-
+
:param app: Sphinx application context.
"""
- app.info('Initializing GitHub plugin')
+ LOG = logging.getLogger(__name__)
+ LOG.info('Initializing GitHub plugin')
app.add_role('ghissue', ghissue_role)
app.add_role('ghpull', ghissue_role)
app.add_role('ghuser', ghuser_role)
app.add_role('ghcommit', ghcommit_role)
app.add_config_value('github_project_url', None, 'env')
- return
+
+ metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
+ return metadata
diff --git a/doc/sphinxext/numpydoc.py b/doc/sphinxext/numpydoc.py
index ff6c44c..864b54a 100644
--- a/doc/sphinxext/numpydoc.py
+++ b/doc/sphinxext/numpydoc.py
@@ -49,7 +49,7 @@ def mangle_docstrings(app, what, name, obj, options, lines,
try:
references.append(int(l[len('.. ['):l.index(']')]))
except ValueError:
- print "WARNING: invalid reference in %s docstring" % name
+ print("WARNING: invalid reference in %s docstring" % name)
# Start renaming from the biggest number, otherwise we may
# overwrite references.
@@ -88,7 +88,7 @@ def initialize(app):
def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_
-
+
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('builder-inited', initialize)
app.add_config_value('numpydoc_edit_link', None, True)
@@ -104,7 +104,7 @@ def monkeypatch_sphinx_ext_autodoc():
if sphinx.ext.autodoc.format_signature is our_format_signature:
return
- print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
+ print("[numpydoc] Monkeypatching sphinx.ext.autodoc ...")
_original_format_signature = sphinx.ext.autodoc.format_signature
sphinx.ext.autodoc.format_signature = our_format_signature
diff --git a/doc/sphinxext/only_directives.py b/doc/sphinxext/only_directives.py
index ffb4d84..22dc8b2 100644
--- a/doc/sphinxext/only_directives.py
+++ b/doc/sphinxext/only_directives.py
@@ -37,8 +37,8 @@ def builder_inited(app):
html_only.traverse = only_base.dont_traverse
def setup(app):
- app.add_directive('htmlonly', html_only_directive, True, (0, 0, 0))
- app.add_directive('latexonly', latex_only_directive, True, (0, 0, 0))
+ app.add_directive('htmlonly', html_only_directive, True)
+ app.add_directive('latexonly', latex_only_directive, True)
app.add_node(html_only)
app.add_node(latex_only)
diff --git a/doc/sphinxext/plot_directive.py b/doc/sphinxext/plot_directive.py
deleted file mode 100644
index 3f0963b..0000000
--- a/doc/sphinxext/plot_directive.py
+++ /dev/null
@@ -1,489 +0,0 @@
-"""A special directive for including a matplotlib plot.
-
-The source code for the plot may be included in one of two ways:
-
- 1. A path to a source file as the argument to the directive::
-
- .. plot:: path/to/plot.py
-
- When a path to a source file is given, the content of the
- directive may optionally contain a caption for the plot::
-
- .. plot:: path/to/plot.py
-
- This is the caption for the plot
-
- Additionally, one my specify the name of a function to call (with
- no arguments) immediately after importing the module::
-
- .. plot:: path/to/plot.py plot_function1
-
- 2. Included as inline content to the directive::
-
- .. plot::
-
- import matplotlib.pyplot as plt
- import matplotlib.image as mpimg
- import numpy as np
- img = mpimg.imread('_static/stinkbug.png')
- imgplot = plt.imshow(img)
-
-In HTML output, `plot` will include a .png file with a link to a high-res
-.png and .pdf. In LaTeX output, it will include a .pdf.
-
-To customize the size of the plot, this directive supports all of the
-options of the `image` directive, except for `target` (since plot will
-add its own target). These include `alt`, `height`, `width`, `scale`,
-`align` and `class`.
-
-Additionally, if the `:include-source:` option is provided, the
-literal source will be displayed inline in the text, (as well as a
-link to the source in HTML). If this source file is in a non-UTF8 or
-non-ASCII encoding, the encoding must be specified using the
-`:encoding:` option.
-
-The set of file formats to generate can be specified with the
-`plot_formats` configuration variable.
-"""
-
-import sys, os, shutil, imp, warnings, cStringIO, re
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
-
-from docutils.parsers.rst import directives
-try:
- # docutils 0.4
- from docutils.parsers.rst.directives.images import align
-except ImportError:
- # docutils 0.5
- from docutils.parsers.rst.directives.images import Image
- align = Image.align
-import sphinx
-
-sphinx_version = sphinx.__version__.split(".")
-# The split is necessary for sphinx beta versions where the string is
-# '6b1'
-sphinx_version = tuple([int(re.split('[a-z]', x)[0])
- for x in sphinx_version[:2]])
-
-import matplotlib
-import matplotlib.cbook as cbook
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-import matplotlib.image as image
-from matplotlib import _pylab_helpers
-from matplotlib.sphinxext import only_directives
-
-
-class PlotWarning(Warning):
- """Warning category for all warnings generated by this directive.
-
- By printing our warnings with this category, it becomes possible to turn
- them into errors by using in your conf.py::
-
- warnings.simplefilter('error', plot_directive.PlotWarning)
-
- This way, you can ensure that your docs only build if all your examples
- actually run successfully.
- """
- pass
-
-
-# os.path.relpath is new in Python 2.6
-if hasattr(os.path, 'relpath'):
- relpath = os.path.relpath
-else:
- # This code is snagged from Python 2.6
-
- def relpath(target, base=os.curdir):
- """
- Return a relative path to the target from either the current dir or an optional base dir.
- Base can be a directory specified either as absolute or relative to current dir.
- """
-
- if not os.path.exists(target):
- raise OSError, 'Target does not exist: '+target
-
- if not os.path.isdir(base):
- raise OSError, 'Base is not a directory or does not exist: '+base
-
- base_list = (os.path.abspath(base)).split(os.sep)
- target_list = (os.path.abspath(target)).split(os.sep)
-
- # On the windows platform the target may be on a completely
- # different drive from the base.
- if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
- raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
-
- # Starting from the filepath root, work out how much of the
- # filepath is shared by base and target.
- for i in range(min(len(base_list), len(target_list))):
- if base_list[i] <> target_list[i]: break
- else:
- # If we broke out of the loop, i is pointing to the first
- # differing path elements. If we didn't break out of the
- # loop, i is pointing to identical path elements.
- # Increment i so that in all cases it points to the first
- # differing path elements.
- i+=1
-
- rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
- if rel_list:
- return os.path.join(*rel_list)
- else:
- return ""
-
-template = """
-.. htmlonly::
-
- %(links)s
-
- .. figure:: %(prefix)s%(tmpdir)s/%(outname)s.png
-%(options)s
-
-%(caption)s
-
-.. latexonly::
- .. figure:: %(prefix)s%(tmpdir)s/%(outname)s.pdf
-%(options)s
-
-%(caption)s
-
-"""
-
-exception_template = """
-.. htmlonly::
-
- [`source code <%(linkdir)s/%(basename)s.py>`__]
-
-Exception occurred rendering plot.
-
-"""
-
-template_content_indent = ' '
-
-def out_of_date(original, derived):
- """
- Returns True if derivative is out-of-date wrt original,
- both of which are full file paths.
- """
- return (not os.path.exists(derived) or
- (os.path.exists(original) and
- os.stat(derived).st_mtime < os.stat(original).st_mtime))
-
-def run_code(plot_path, function_name, plot_code):
- """
- Import a Python module from a path, and run the function given by
- name, if function_name is not None.
- """
- # Change the working directory to the directory of the example, so
- # it can get at its data files, if any. Add its path to sys.path
- # so it can import any helper modules sitting beside it.
- if plot_code is not None:
- exec(plot_code)
- else:
- pwd = os.getcwd()
- path, fname = os.path.split(plot_path)
- sys.path.insert(0, os.path.abspath(path))
- stdout = sys.stdout
- sys.stdout = cStringIO.StringIO()
- os.chdir(path)
- fd = None
- try:
- fd = open(fname)
- module = imp.load_module(
- "__plot__", fd, fname, ('py', 'r', imp.PY_SOURCE))
- finally:
- del sys.path[0]
- os.chdir(pwd)
- sys.stdout = stdout
- if fd is not None:
- fd.close()
-
- if function_name is not None:
- getattr(module, function_name)()
-
-def run_savefig(plot_path, basename, tmpdir, destdir, formats):
- """
- Once a plot script has been imported, this function runs savefig
- on all of the figures in all of the desired formats.
- """
- fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
- for i, figman in enumerate(fig_managers):
- for j, (format, dpi) in enumerate(formats):
- if len(fig_managers) == 1:
- outname = basename
- else:
- outname = "%s_%02d" % (basename, i)
- outname = outname + "." + format
- outpath = os.path.join(tmpdir, outname)
- try:
- figman.canvas.figure.savefig(outpath, dpi=dpi)
- except:
- s = cbook.exception_to_str("Exception saving plot %s" % plot_path)
- warnings.warn(s, PlotWarning)
- return 0
- if j > 0:
- shutil.copyfile(outpath, os.path.join(destdir, outname))
-
- return len(fig_managers)
-
-def clear_state():
- plt.close('all')
- matplotlib.rcdefaults()
- # Set a default figure size that doesn't overflow typical browser
- # windows. The script is free to override it if necessary.
- matplotlib.rcParams['figure.figsize'] = (5.5, 4.5)
-
-def render_figures(plot_path, function_name, plot_code, tmpdir, destdir,
- formats):
- """
- Run a pyplot script and save the low and high res PNGs and a PDF
- in outdir.
- """
- plot_path = str(plot_path) # todo, why is unicode breaking this
- basedir, fname = os.path.split(plot_path)
- basename, ext = os.path.splitext(fname)
-
- all_exists = True
-
- # Look for single-figure output files first
- for format, dpi in formats:
- outname = os.path.join(tmpdir, '%s.%s' % (basename, format))
- if out_of_date(plot_path, outname):
- all_exists = False
- break
-
- if all_exists:
- return 1
-
- # Then look for multi-figure output files, assuming
- # if we have some we have all...
- i = 0
- while True:
- all_exists = True
- for format, dpi in formats:
- outname = os.path.join(
- tmpdir, '%s_%02d.%s' % (basename, i, format))
- if out_of_date(plot_path, outname):
- all_exists = False
- break
- if all_exists:
- i += 1
- else:
- break
-
- if i != 0:
- return i
-
- # We didn't find the files, so build them
-
- clear_state()
- try:
- run_code(plot_path, function_name, plot_code)
- except:
- s = cbook.exception_to_str("Exception running plot %s" % plot_path)
- warnings.warn(s, PlotWarning)
- return 0
-
- num_figs = run_savefig(plot_path, basename, tmpdir, destdir, formats)
-
- if '__plot__' in sys.modules:
- del sys.modules['__plot__']
-
- return num_figs
-
-def _plot_directive(plot_path, basedir, function_name, plot_code, caption,
- options, state_machine):
- formats = setup.config.plot_formats
- if type(formats) == str:
- formats = eval(formats)
-
- fname = os.path.basename(plot_path)
- basename, ext = os.path.splitext(fname)
-
- # Get the directory of the rst file, and determine the relative
- # path from the resulting html file to the plot_directive links
- # (linkdir). This relative path is used for html links *only*,
- # and not the embedded image. That is given an absolute path to
- # the temporary directory, and then sphinx moves the file to
- # build/html/_images for us later.
- rstdir, rstfile = os.path.split(state_machine.document.attributes['source'])
- outdir = os.path.join('plot_directive', basedir)
- reldir = relpath(setup.confdir, rstdir)
- linkdir = os.path.join(reldir, outdir)
-
- # tmpdir is where we build all the output files. This way the
- # plots won't have to be redone when generating latex after html.
-
- # Prior to Sphinx 0.6, absolute image paths were treated as
- # relative to the root of the filesystem. 0.6 and after, they are
- # treated as relative to the root of the documentation tree. We
- # need to support both methods here.
- tmpdir = os.path.join('build', outdir)
- tmpdir = os.path.abspath(tmpdir)
- if sphinx_version < (0, 6):
- prefix = ''
- else:
- prefix = '/'
- if not os.path.exists(tmpdir):
- cbook.mkdirs(tmpdir)
-
- # destdir is the directory within the output to store files
- # that we'll be linking to -- not the embedded images.
- destdir = os.path.abspath(os.path.join(setup.app.builder.outdir, outdir))
- if not os.path.exists(destdir):
- cbook.mkdirs(destdir)
-
- # Properly indent the caption
- caption = '\n'.join(template_content_indent + line.strip()
- for line in caption.split('\n'))
-
- # Generate the figures, and return the number of them
- num_figs = render_figures(plot_path, function_name, plot_code, tmpdir,
- destdir, formats)
-
- # Now start generating the lines of output
- lines = []
-
- if plot_code is None:
- shutil.copyfile(plot_path, os.path.join(destdir, fname))
-
- if options.has_key('include-source'):
- if plot_code is None:
- lines.extend(
- ['.. include:: %s' % os.path.join(setup.app.builder.srcdir, plot_path),
- ' :literal:'])
- if options.has_key('encoding'):
- lines.append(' :encoding: %s' % options['encoding'])
- del options['encoding']
- else:
- lines.extend(['::', ''])
- lines.extend([' %s' % row.rstrip()
- for row in plot_code.split('\n')])
- lines.append('')
- del options['include-source']
- else:
- lines = []
-
- if num_figs > 0:
- options = ['%s:%s: %s' % (template_content_indent, key, val)
- for key, val in options.items()]
- options = "\n".join(options)
-
- for i in range(num_figs):
- if num_figs == 1:
- outname = basename
- else:
- outname = "%s_%02d" % (basename, i)
-
- # Copy the linked-to files to the destination within the build tree,
- # and add a link for them
- links = []
- if plot_code is None:
- links.append('`source code <%(linkdir)s/%(basename)s.py>`__')
- for format, dpi in formats[1:]:
- links.append('`%s <%s/%s.%s>`__' % (format, linkdir, outname, format))
- if len(links):
- links = '[%s]' % (', '.join(links) % locals())
- else:
- links = ''
-
- lines.extend((template % locals()).split('\n'))
- else:
- lines.extend((exception_template % locals()).split('\n'))
-
- if len(lines):
- state_machine.insert_input(
- lines, state_machine.input_lines.source(0))
-
- return []
-
-def plot_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- """
- Handle the arguments to the plot directive. The real work happens
- in _plot_directive.
- """
- # The user may provide a filename *or* Python code content, but not both
- if len(arguments):
- plot_path = directives.uri(arguments[0])
- basedir = relpath(os.path.dirname(plot_path), setup.app.builder.srcdir)
-
- # If there is content, it will be passed as a caption.
-
- # Indent to match expansion below. XXX - The number of spaces matches
- # that of the 'options' expansion further down. This should be moved
- # to common code to prevent them from diverging accidentally.
- caption = '\n'.join(content)
-
- # If the optional function name is provided, use it
- if len(arguments) == 2:
- function_name = arguments[1]
- else:
- function_name = None
-
- return _plot_directive(plot_path, basedir, function_name, None, caption,
- options, state_machine)
- else:
- plot_code = '\n'.join(content)
-
- # Since we don't have a filename, use a hash based on the content
- plot_path = md5(plot_code).hexdigest()[-10:]
-
- return _plot_directive(plot_path, 'inline', None, plot_code, '', options,
- state_machine)
-
-def mark_plot_labels(app, document):
- """
- To make plots referenceable, we need to move the reference from
- the "htmlonly" (or "latexonly") node to the actual figure node
- itself.
- """
- for name, explicit in document.nametypes.iteritems():
- if not explicit:
- continue
- labelid = document.nameids[name]
- if labelid is None:
- continue
- node = document.ids[labelid]
- if node.tagname in ('html_only', 'latex_only'):
- for n in node:
- if n.tagname == 'figure':
- sectname = name
- for c in n:
- if c.tagname == 'caption':
- sectname = c.astext()
- break
-
- node['ids'].remove(labelid)
- node['names'].remove(name)
- n['ids'].append(labelid)
- n['names'].append(name)
- document.settings.env.labels[name] = \
- document.settings.env.docname, labelid, sectname
- break
-
-def setup(app):
- setup.app = app
- setup.config = app.config
- setup.confdir = app.confdir
-
- options = {'alt': directives.unchanged,
- 'height': directives.length_or_unitless,
- 'width': directives.length_or_percentage_or_unitless,
- 'scale': directives.nonnegative_int,
- 'align': align,
- 'class': directives.class_option,
- 'include-source': directives.flag,
- 'encoding': directives.encoding }
-
- app.add_directive('plot', plot_directive, True, (0, 2, 0), **options)
- app.add_config_value(
- 'plot_formats',
- [('png', 80), ('hires.png', 200), ('pdf', 50)],
- True)
-
- app.connect('doctree-read', mark_plot_labels)
diff --git a/doc/upload-gh-pages.sh b/doc/upload-gh-pages.sh
new file mode 100755
index 0000000..9d028b4
--- /dev/null
+++ b/doc/upload-gh-pages.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# Upload website to gh-pages
+USAGE="$0 <html_dir> <project-name> [<organization-name>]"
+HTML_DIR=$1
+if [ -z "$HTML_DIR" ]; then
+ echo $USAGE
+ exit 1
+fi
+if [ ! -e "$HTML_DIR/index.html" ]; then
+ echo "$HTML_DIR does not contain an index.html"
+ exit 1
+fi
+if [ -d "$HTML_DIR/.git" ]; then
+ echo "$HTML_DIR already contains a .git directory"
+ exit 1
+fi
+PROJECT=$2
+if [ -z "$PROJECT" ]; then
+ echo $USAGE
+ exit 1
+fi
+ORGANIZATION=$3
+if [ -z "$ORGANIZATION" ]; then
+ ORGANIZATION=nipy
+fi
+upstream_repo="https://github.com/$ORGANIZATION/$PROJECT"
+cd $HTML_DIR
+git init
+git checkout -b gh-pages
+git add *
+# A nojekyll file is needed to tell github that this is *not* a jekyll site:
+touch .nojekyll
+git add .nojekyll
+git commit -a -m "Documentation build - no history"
+git remote add origin $upstream_repo
+git push origin gh-pages --force
+rm -rf .git # Yes
diff --git a/doc/users/install.rst b/doc/users/install.rst
index 34b665a..f7832a0 100644
--- a/doc/users/install.rst
+++ b/doc/users/install.rst
@@ -14,16 +14,10 @@ Dependencies
Must Have
~~~~~~~~~
-Python_ 2.5 or later
-
-NumPy_ 1.3 or later
-
-SciPy_ 0.7 or later
- Numpy and Scipy are high-level, optimized scientific computing libraries.
-
-Matplotlib_
- Python plotting library. In particular, :mod:`Nitime` makes use of the
- :mod:`matplotlib.mlab` module for some implementation of numerical algorithms
+Nitime supports Python_ 2.7 and 3.3/3.4/3.5, requiring also reasonably recent
+versions of NumPy_ and SciPy_, as well as Matplotlib_ (In particular,
+:mod:`Nitime` makes use of the :mod:`matplotlib.mlab` module for some
+implementation of numerical algorithms)
Recommended/optional
~~~~~~~~~~~~~~~~~~~~
@@ -42,35 +36,39 @@ Nibabel_
Getting the latest release
--------------------------
-If you have easy_install_ available on your system, nitime can be downloaded and
-install by issuing::
-
- easy_install nitime
+Using the standard `pip` installation mechanism, you can install nitime by
+issuing the following command in your terminal::
-.. _easy_install: easy-install_
+ pip install nitime
-Otherwise, you can grab the latest release of the source-code at this page_
+The source code of the latest release is also available to download at the
+cheeseshop_, or in our Github repo release page_
.. _page: gh-download_
-Or, at the cheeseshop_
-
.. _cheeseshop: nitime-pypi_
If you want to download the source-code as it is being developed (pre-release),
follow the instructions here: :ref:`following-latest`
Or, if you just want to look at the current development, without using our
-source version-control system, go here_
+source version-control system, you can download it directly here_
.. _here: gh-archive_
+You can also install nitime using conda_, by issuing the following commands::
+
+ conda config --add channels conda-forge
+ conda install nitime
+
+
Building from source
--------------------
The installation process is similar to other Python packages so it
-will be familiar if you have Python experience.
+will be familiar if you have Python experience. In addition to the previously
+mentioned dependencies, you will need to have cython_ installed
Unpack the tarball and change into the source directory. Once in the
source directory, you can build nitime using::
diff --git a/doc/users/overview.rst b/doc/users/overview.rst
index 8105ce0..dfd50bc 100644
--- a/doc/users/overview.rst
+++ b/doc/users/overview.rst
@@ -101,7 +101,7 @@ representation of financial time-series, which can be compared against each
other, using the common reference and for which the concept of the work-week
applies.
-However, because most often the absolute calender time of the occurence of
+However, because most often the absolute calendar time of the occurrence of
events in an experiment is of no importance, we can disregard it. Rather, the
comparison of the time progression of data in different experiments conducted
in different calendar times (different days, different times in the same day)
@@ -142,7 +142,7 @@ For the time being, there are two types of Time classes: :ref:`TimeArray` and :r
:class:`TimeArray`
-------------------
-This class has less restrictions on it: it is made of an 1-d array, which contains time-points that are not neccesarily ordered. It can also contain several copies of the same time-point. This class can be used in order to represent sparsely occuring events, measured at some unspecified sampling rate and possibly collected from several different channels, where the data is sampled in order of channel and not in order of time. As in the case of the :class:`np.ndarray`. This representation of time carries, in addition to the array itself an attribute :attr:`time_unit`, which is the unit in which we would like to present the time-points (recall that the underlying representation is always in the base-unit).
+This class has less restrictions on it: it is made of an 1-d array, which contains time-points that are not necessarily ordered. It can also contain several copies of the same time-point. This class can be used in order to represent sparsely occurring events, measured at some unspecified sampling rate and possibly collected from several different channels, where the data is sampled in order of channel and not in order of time. As in the case of the :class:`np.ndarray`. This representation of time carries, in addition to the array itself an attribute :attr:`time_unit`, which is the unit in which we would like to present the time-points (recall that the underlying representation is always in the base-unit).
.. _UniformTime:
@@ -229,10 +229,10 @@ contains several attributes:
---------------
This is an object which represents a collection of events. For example, this
-can represent discrete button presses occuring during an experiment. This
+can represent discrete button presses occurring during an experiment. This
object contains a :ref:`TimeArray` as its representation of time. This means
that the events recorded in the :attr:`data` array can be organized
-according to any organizing principle you would want, not neccesarily according
+according to any organizing principle you would want, not necessarily according
to their organization or order in time. For example, if events are read from
different devices, the order of the events in the data array can be arbitrarily
chosen to be the order of the devices from which data is read.
diff --git a/doc/whatsnew/index.rst b/doc/whatsnew/index.rst
index 781e95e..30274fe 100644
--- a/doc/whatsnew/index.rst
+++ b/doc/whatsnew/index.rst
@@ -13,11 +13,13 @@ What's new in Nitime
=====================
This section documents the changes that have been made in various versions of
-Nitime:
+Nitime:
.. toctree::
:maxdepth: 1
+ version0.7
+ version0.6
version0.5
version0.4
version0.3
diff --git a/doc/whatsnew/version0.6.rst b/doc/whatsnew/version0.6.rst
new file mode 100644
index 0000000..80911a6
--- /dev/null
+++ b/doc/whatsnew/version0.6.rst
@@ -0,0 +1,57 @@
+======================================
+ Release notes for nitime version 0.6
+======================================
+
+Summary of changes
+------------------
+
+This is a maintenance release, to support newer versions of matplotlib and numpy.
+
+
+Contributors to this release
+----------------------------
+
+The following people contributed to this release:
+
+* Ariel Rokem
+* Igor Gnatenko
+* Yaroslav Halchenko
+
+.. Note::
+
+ This list was generated using::
+
+ git log --pretty=format:"* %aN" rel/0.5... | sort | uniq
+
+ Please let us know if you should appear on this list and do not, so that we
+ can add your name in future release notes.
+
+
+Detailed stats from the github repository
+-----------------------------------------
+
+GitHub stats for the last 604 days.
+We closed a total of 15 issues, 6 pull requests and 9 regular
+issues; this is the full list (generated with the script
+`tools/github_stats.py`):
+
+Pull Requests (6):
+
+* :ghissue:`139`: Testing fixes
+* :ghissue:`138`: TST: Update Travis config, add appveyor config.
+* :ghissue:`134`: test_viz: import networkx instead of nx
+* :ghissue:`135`: BF: Newer numpy can't deal with the type coercion that comes with inplace division
+* :ghissue:`130`: RF: Modernize this call to Matplotlib to address DeprecationWarning
+* :ghissue:`128`: BF(PY2.6): use sys.version_info as unnamed tuple
+
+Issues (9):
+
+* :ghissue:`139`: Testing fixes
+* :ghissue:`138`: TST: Update Travis config, add appveyor config.
+* :ghissue:`134`: test_viz: import networkx instead of nx
+* :ghissue:`135`: BF: Newer numpy can't deal with the type coercion that comes with inp…
+* :ghissue:`132`: RuntimeWarning: Breaking due to iterative meltdown in nitime.utils.adaptive_weights.
+* :ghissue:`129`: nitime.viz import error, ImportError: 'No module named mpl_toolkits.axes_grid'
+* :ghissue:`130`: RF: Modernize this call to Matplotlib to address DeprecationWarning
+* :ghissue:`131`: Multi-taper coherence estimation with two signals of different length?
+* :ghissue:`128`: BF(PY2.6): use sys.version_info as unnamed tuple
diff --git a/doc/whatsnew/version0.7.rst b/doc/whatsnew/version0.7.rst
new file mode 100644
index 0000000..63823f5
--- /dev/null
+++ b/doc/whatsnew/version0.7.rst
@@ -0,0 +1,67 @@
+======================================
+ Release notes for nitime version 0.7
+======================================
+
+Summary of changes
+------------------
+
+This is a maintenance release.
+
+Testing in this version no longer depends on nose_.
+
+
+Contributors to this release
+----------------------------
+
+The following people contributed to this release:
+
+* Ariel Rokem
+* Eric Larson
+* Paul Ivanov
+* Takeshi Abe
+* Tom Dupré la Tour
+* James Franco
+
+.. Note::
+
+ This list was generated using::
+
+ git log --pretty=format:"* %aN" rel/0.5... | sort | uniq
+
+ Please let us know if you should appear on this list and do not, so that we
+ can add your name in future release notes.
+
+
+Detailed stats from the github repository
+-----------------------------------------
+GitHub stats for the last 310 days.
+We closed a total of 19 issues, 9 pull requests and 10 regular
+issues; this is the full list (generated with the script
+`tools/github_stats.py`):
+
+Pull Requests (9):
+
+* :ghissue:`147`: 'GrangerAnalyzer's causality_xy[i, j] should represent the causality i => j'
+* :ghissue:`148`: 'Remove support for Python 3.3'
+* :ghissue:`146`: 'BF: Addition (and sub, mul, div, etc.) should work between time-series.'
+* :ghissue:`145`: 'BF: Inherit t0 from the source time-series'
+* :ghissue:`144`: 'add built files to gitignore'
+* :ghissue:`143`: 'Update doc build machinery to Python 3'
+* :ghissue:`142`: 'Fixing build dependency on numpy'
+* :ghissue:`140`: 'FIX: Fix interpolation of dpss_windows'
+* :ghissue:`139`: 'Testing fixes'
+
+Issues (10):
+
+* :ghissue:`147`: "GrangerAnalyzer's causality_xy[i, j] should represent the causality i => j"
+* :ghissue:`148`: 'Remove support for Python 3.3'
+* :ghissue:`146`: 'BF: Addition (and sub, mul, div, etc.) should work between time-series.'
+* :ghissue:`145`: 'BF: Inherit t0 from the source time-series'
+* :ghissue:`144`: 'add built files to gitignore'
+* :ghissue:`143`: 'Update doc build machinery to Python 3'
+* :ghissue:`141`: 'pip install problem with numpy'
+* :ghissue:`142`: 'Fixing build dependency on numpy'
+* :ghissue:`140`: 'FIX: Fix interpolation of dpss_windows'
+* :ghissue:`139`: 'Testing fixes'
+
+.. include:: ../links_names.txt
diff --git a/min-requirements.txt b/min-requirements.txt
new file mode 100644
index 0000000..8397e83
--- /dev/null
+++ b/min-requirements.txt
@@ -0,0 +1,8 @@
+# Auto-generated by tools/update_requirements.py
+--only-binary numpy,scipy
+--extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple
+matplotlib==3.5
+numpy==1.22
+scipy==1.8
+networkx==2.7
+nibabel==4.0
diff --git a/nitime/__init__.py b/nitime/__init__.py
index c021e80..429060b 100644
--- a/nitime/__init__.py
+++ b/nitime/__init__.py
@@ -21,13 +21,10 @@ have all of these things at their fingertips.
__docformat__ = 'restructuredtext'
-from .version import __version__
+from ._version import __version__
from . import algorithms
from . import timeseries
from . import analysis
-from . import six
from .timeseries import *
-
-from nitime.testlib import test
diff --git a/nitime/_mpl_units.py b/nitime/_mpl_units.py
index a75dbdc..3809a2c 100644
--- a/nitime/_mpl_units.py
+++ b/nitime/_mpl_units.py
@@ -143,7 +143,7 @@ class ConversionInterface:
def convert(obj, unit, axis):
"""
convert obj using unit for the specified axis. If obj is a sequence,
- return the converted sequence. The ouput must be a sequence of scalars
+ return the converted sequence. The output must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
diff --git a/nitime/algorithms/__init__.py b/nitime/algorithms/__init__.py
index 57a9b6d..7a0a2cc 100644
--- a/nitime/algorithms/__init__.py
+++ b/nitime/algorithms/__init__.py
@@ -44,13 +44,17 @@ external events.
:func:
+8. Entropy
+
+:func:`entropy`, :func:`conditional_entropy`, func`mutual_information`,
+:func:`entropy_cc`, :func:`transfer_entropy`
+
The algorithms in this library are the functional form of the algorithms, which
-accept as inputs numpy array and produce numpy array outputs. Therfore, they
+accept as inputs numpy array and produce numpy array outputs. Therefore, they
can be used on any type of data which can be represented in numpy arrays. See
also :mod:`nitime.analysis` for simplified analysis interfaces, using the
data containers implemented in :mod:`nitime.timeseries`
-
"""
from nitime.algorithms.spectral import *
from nitime.algorithms.cohere import *
@@ -59,3 +63,4 @@ from nitime.algorithms.event_related import *
from nitime.algorithms.autoregressive import *
from nitime.algorithms.filter import *
from nitime.algorithms.correlation import *
+from nitime.algorithms.entropy import *
diff --git a/nitime/algorithms/autoregressive.py b/nitime/algorithms/autoregressive.py
index 019a59d..334ff6c 100644
--- a/nitime/algorithms/autoregressive.py
+++ b/nitime/algorithms/autoregressive.py
@@ -148,7 +148,7 @@ def AR_est_LD(x, order, rxx=None):
else:
rxx_m = utils.autocorr(x)[:order + 1]
w = np.zeros((order + 1, ), rxx_m.dtype)
- # intialize the recursion with the R[0]w[1]=r[1] solution (p=1)
+ # initialize the recursion with the R[0]w[1]=r[1] solution (p=1)
b = rxx_m[0].real
w_k = rxx_m[1] / b
w[1] = w_k
diff --git a/nitime/algorithms/cohere.py b/nitime/algorithms/cohere.py
index 300aa2d..af70ec8 100644
--- a/nitime/algorithms/cohere.py
+++ b/nitime/algorithms/cohere.py
@@ -17,7 +17,7 @@ from nitime.lazy import matplotlib_mlab as mlab
from .spectral import get_spectra, get_spectra_bi
import nitime.utils as utils
-# To suppport older versions of numpy that don't have tril_indices:
+# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices
@@ -67,8 +67,8 @@ def coherency(time_series, csd_method=None):
f, fxy = get_spectra(time_series, csd_method)
- #A container for the coherencys, with the size and shape of the expected
- #output:
+ # A container for the coherencys, with the size and shape of the expected
+ # output:
c = np.zeros((time_series.shape[0],
time_series.shape[0],
f.shape[0]), dtype=complex) # Make sure it's complex
@@ -108,7 +108,6 @@ def coherency_spec(fxy, fxx, fyy):
--------
:func:`coherency`
"""
-
return fxy / np.sqrt(fxx * fyy)
@@ -275,7 +274,6 @@ def coherency_regularized(time_series, epsilon, alpha, csd_method=None):
def _coherency_reqularized(fxy, fxx, fyy, epsilon, alpha):
-
r"""
A regularized version of the calculation of coherency, which is more
robust to numerical noise than the standard calculation
@@ -303,9 +301,8 @@ def _coherency_reqularized(fxy, fxx, fyy, epsilon, alpha):
The coherence values
"""
-
return (((alpha * fxy + epsilon)) /
- np.sqrt(((alpha ** 2) * (fxx + epsilon) * (fyy + epsilon))))
+ np.sqrt(((alpha ** 2) * (fxx + epsilon) * (fyy + epsilon))))
def coherence_regularized(time_series, epsilon, alpha, csd_method=None):
@@ -341,9 +338,6 @@ def coherence_regularized(time_series, epsilon, alpha, csd_method=None):
This is a symmetric matrix with the coherencys of the signals. The
coherency of signal i and signal j is in f[i][j].
- Returns
- -------
- frequencies, coherence
Notes
-----
@@ -360,8 +354,8 @@ def coherence_regularized(time_series, epsilon, alpha, csd_method=None):
f, fxy = get_spectra(time_series, csd_method)
- #A container for the coherences, with the size and shape of the expected
- #output:
+ # A container for the coherences, with the size and shape of the expected
+ # output:
c = np.zeros((time_series.shape[0],
time_series.shape[0],
f.shape[0]), complex)
@@ -378,7 +372,6 @@ def coherence_regularized(time_series, epsilon, alpha, csd_method=None):
def _coherence_reqularized(fxy, fxx, fyy, epsilon, alpha):
-
r"""A regularized version of the calculation of coherence, which is more
robust to numerical noise than the standard calculation.
@@ -406,7 +399,7 @@ def _coherence_reqularized(fxy, fxx, fyy, epsilon, alpha):
"""
return (((alpha * np.abs(fxy) + epsilon) ** 2) /
- ((alpha ** 2) * (fxx + epsilon) * (fyy + epsilon)))
+ ((alpha ** 2) * (fxx + epsilon) * (fyy + epsilon)))
def coherency_bavg(time_series, lb=0, ub=None, csd_method=None):
@@ -509,7 +502,6 @@ def _coherency_bavg(fxy, fxx, fyy):
temporal dynamics of functional networks using phase spectrum of fMRI
data. Neuroimage, 28: 227-37.
"""
-
# Average the phases and the magnitudes separately and then recombine:
p = np.angle(fxy)
@@ -519,7 +511,7 @@ def _coherency_bavg(fxy, fxx, fyy):
m_bavg = np.mean(m)
# Recombine according to z = r(cos(phi)+sin(phi)i):
- return m_bavg * (np.cos(p_bavg) + np.sin(p_bavg) * 1j)
+ return m_bavg * (np.cos(p_bavg) + np.sin(p_bavg) * 1j)
def coherence_bavg(time_series, lb=0, ub=None, csd_method=None):
@@ -546,7 +538,6 @@ def coherence_bavg(time_series, lb=0, ub=None, csd_method=None):
This is an upper-diagonal array, where c[i][j] is the band-averaged
coherency between time_series[i] and time_series[j]
"""
-
if csd_method is None:
csd_method = {'this_method': 'welch'} # The default
@@ -575,7 +566,8 @@ def coherence_bavg(time_series, lb=0, ub=None, csd_method=None):
def _coherence_bavg(fxy, fxx, fyy):
r"""
Compute the band-averaged coherency between the spectra of two time series.
- input to this function is in the frequency domain
+
+ Input to this function is in the frequency domain
Parameters
----------
@@ -649,7 +641,6 @@ def coherence_partial(time_series, r, csd_method=None):
functional connectivity using coherence and partial coherence analyses of
fMRI data Neuroimage, 21: 647-58.
"""
-
if csd_method is None:
csd_method = {'this_method': 'welch'} # The default
@@ -664,8 +655,12 @@ def coherence_partial(time_series, r, csd_method=None):
for j in range(i, time_series.shape[0]):
f, fxx, frr, frx = get_spectra_bi(time_series[i], r, csd_method)
f, fyy, frr, fry = get_spectra_bi(time_series[j], r, csd_method)
- c[i, j] = coherence_partial_spec(fxy[i][j], fxy[i][i],
- fxy[j][j], frx, fry, frr)
+ c[i, j] = coherence_partial_spec(fxy[i][j],
+ fxy[i][i],
+ fxy[j][j],
+ frx,
+ fry,
+ frr)
idx = tril_indices(time_series.shape[0], -1)
c[idx[0], idx[1], ...] = c[idx[1], idx[0], ...].conj() # Make it symmetric
@@ -702,7 +697,7 @@ def coherence_partial_spec(fxy, fxx, fyy, fxr, fry, frr):
Rxy = coh(fxy, fxx, fyy)
return (((np.abs(Rxy - Rxr * Rry)) ** 2) /
- ((1 - ((np.abs(Rxr)) ** 2)) * (1 - ((np.abs(Rry)) ** 2))))
+ ((1 - ((np.abs(Rxr)) ** 2)) * (1 - ((np.abs(Rry)) ** 2))))
def coherency_phase_spectrum(time_series, csd_method=None):
@@ -799,8 +794,9 @@ def coherency_phase_delay(time_series, lb=0, ub=None, csd_method=None):
for j in range(i, time_series.shape[0]):
p[i][j] = _coherency_phase_delay(f[lb_idx:ub_idx],
fxy[i][j][lb_idx:ub_idx])
- p[j][i] = _coherency_phase_delay(f[lb_idx:ub_idx],
- fxy[i][j][lb_idx:ub_idx].conjugate())
+ p[j][i] = _coherency_phase_delay(
+ f[lb_idx:ub_idx],
+ fxy[i][j][lb_idx:ub_idx].conjugate())
return f[lb_idx:ub_idx], p
@@ -826,7 +822,6 @@ def _coherency_phase_delay(f, fxy):
the phase delay (in sec) for each frequency band.
"""
-
return np.angle(fxy) / (2 * np.pi * f)
@@ -861,9 +856,7 @@ def correlation_spectrum(x1, x2, Fs=2 * np.pi, norm=False):
J Wendt, P A Turski, C H Moritz, M A Quigley, M E Meyerand (2000). Mapping
functionally related regions of brain with functional connectivity MR
imaging. AJNR American journal of neuroradiology 21:1636-44
-
"""
-
x1 = x1 - np.mean(x1)
x2 = x2 - np.mean(x2)
x1_f = fftpack.fft(x1)
@@ -876,18 +869,18 @@ def correlation_spectrum(x1, x2, Fs=2 * np.pi, norm=False):
(D * n))
if norm:
- ccn = ccn / np.sum(ccn) * 2 # Only half of the sum is sent back
- # because of the freq domain symmetry.
- # XXX Does normalization make this
- # strictly positive?
+ # Only half of the sum is sent back because of the freq domain
+ # symmetry.
+ ccn = ccn / np.sum(ccn) * 2
+ # XXX Does normalization make this strictly positive?
f = utils.get_freqs(Fs, n)
- return f, ccn[0:(n / 2 + 1)]
+ return f, ccn[0:(n // 2 + 1)]
-#------------------------------------------------------------------------
-#Coherency calculated using cached spectra
-#------------------------------------------------------------------------
+# -----------------------------------------------------------------------
+# Coherency calculated using cached spectra
+# -----------------------------------------------------------------------
"""The idea behind this set of functions is to keep a cache of the windowed fft
calculations of each time-series in a massive collection of time-series, so
that this calculation doesn't have to be repeated each time a cross-spectrum is
@@ -898,8 +891,8 @@ created by coherence"""
def cache_fft(time_series, ij, lb=0, ub=None,
- method=None, prefer_speed_over_memory=False,
- scale_by_freq=True):
+ method=None, prefer_speed_over_memory=False,
+ scale_by_freq=True):
"""compute and cache the windowed FFTs of the time_series, in such a way
that computing the psd and csd of any combination of them can be done
quickly.
@@ -957,7 +950,7 @@ def cache_fft(time_series, ij, lb=0, ub=None,
raise ValueError(e_s)
time_series = utils.zero_pad(time_series, NFFT)
- #The shape of the zero-padded version:
+ # The shape of the zero-padded version:
n_channels, n_time_points = time_series.shape
# get all the unique channels in time_series that we are interested in by
@@ -973,36 +966,36 @@ def cache_fft(time_series, ij, lb=0, ub=None,
else:
n_freqs = NFFT // 2 + 1
- #Which frequencies
+ # Which frequencies
freqs = utils.get_freqs(Fs, NFFT)
- #If there are bounds, limit the calculation to within that band,
- #potentially include the DC component:
+ # If there are bounds, limit the calculation to within that band,
+ # potentially include the DC component:
lb_idx, ub_idx = utils.get_bounds(freqs, lb, ub)
n_freqs = ub_idx - lb_idx
- #Make the window:
- if mlab.cbook.iterable(window):
+ # Make the window:
+ if np.iterable(window):
assert(len(window) == NFFT)
window_vals = window
else:
window_vals = window(np.ones(NFFT, time_series.dtype))
- #Each fft needs to be normalized by the square of the norm of the window
- #and, for consistency with newer versions of mlab.csd (which, in turn, are
- #consistent with Matlab), normalize also by the sampling rate:
+ # Each fft needs to be normalized by the square of the norm of the window
+ # and, for consistency with newer versions of mlab.csd (which, in turn, are
+ # consistent with Matlab), normalize also by the sampling rate:
if scale_by_freq:
- #This is the normalization factor for one-sided estimation, taking into
- #account the sampling rate. This makes the PSD a density function, with
- #units of dB/Hz, so that integrating over frequencies gives you the RMS
- #(XXX this should be in the tests!).
+ # This is the normalization factor for one-sided estimation, taking
+ # into account the sampling rate. This makes the PSD a density
+ # function, with units of dB/Hz, so that integrating over
+ # frequencies gives you the RMS. (XXX this should be in the tests!).
norm_val = (np.abs(window_vals) ** 2).sum() * (Fs / 2)
else:
norm_val = (np.abs(window_vals) ** 2).sum() / 2
- # cache the FFT of every windowed, detrended NFFT length segement
+ # cache the FFT of every windowed, detrended NFFT length segment
# of every channel. If prefer_speed_over_memory, cache the conjugate
# as well
@@ -1012,16 +1005,14 @@ def cache_fft(time_series, ij, lb=0, ub=None,
FFT_conj_slices = {}
for i_channel in all_channels:
- #dbg:
- #print i_channel
- Slices = np.zeros((n_slices, n_freqs), dtype=np.complex)
+ Slices = np.zeros((n_slices, n_freqs), dtype=complex)
for iSlice in range(n_slices):
thisSlice = time_series[i_channel,
i_times[iSlice]:i_times[iSlice] + NFFT]
- #Windowing:
+ # Windowing:
thisSlice = window_vals * thisSlice # No detrending
- #Derive the fft for that slice:
+ # Derive the fft for that slice:
Slices[iSlice, :] = (fftpack.fft(thisSlice)[lb_idx:ub_idx])
FFT_slices[i_channel] = Slices
@@ -1068,15 +1059,13 @@ def cache_to_psd(cache, ij):
all_channels.add(j)
for i in all_channels:
- #dbg:
- #print i
- #If we made the conjugate slices:
+ # If we made the conjugate slices:
if FFT_conj_slices:
Pxx[i] = FFT_slices[i] * FFT_conj_slices[i]
else:
Pxx[i] = FFT_slices[i] * np.conjugate(FFT_slices[i])
- #If there is more than one window
+ # If there is more than one window
if FFT_slices[i].shape[0] > 1:
Pxx[i] = np.mean(Pxx[i], 0)
@@ -1123,7 +1112,7 @@ def cache_to_phase(cache, ij):
for i in all_channels:
Phase[i] = np.angle(FFT_slices[i])
- #If there is more than one window, average over all the windows:
+ # If there is more than one window, average over all the windows:
if FFT_slices[i].shape[0] > 1:
Phase[i] = np.mean(Phase[i], 0)
@@ -1171,10 +1160,10 @@ def cache_to_relative_phase(cache, ij):
channels_i = max(1, max(ij_array[:, 0]) + 1)
channels_j = max(1, max(ij_array[:, 1]) + 1)
- #Pre-allocate for speed:
- Phi_xy = np.zeros((channels_i, channels_j, freqs), dtype=np.complex)
+ # Pre-allocate for speed:
+ Phi_xy = np.zeros((channels_i, channels_j, freqs), dtype=complex)
- #These checks take time, so do them up front, not in every iteration:
+ # These checks take time, so do them up front, not in every iteration:
if list(FFT_slices.items())[0][1].shape[0] > 1:
if FFT_conj_slices:
for i, j in ij:
@@ -1232,7 +1221,7 @@ def cache_to_coherency(cache, ij):
channels_i = max(1, max(ij_array[:, 0]) + 1)
channels_j = max(1, max(ij_array[:, 1]) + 1)
- Cxy = np.zeros((channels_i, channels_j, freqs), dtype=np.complex)
+ Cxy = np.zeros((channels_i, channels_j, freqs), dtype=complex)
#These checks take time, so do them up front, not in every iteration:
if list(FFT_slices.items())[0][1].shape[0] > 1:
diff --git a/nitime/algorithms/entropy.py b/nitime/algorithms/entropy.py
new file mode 100644
index 0000000..c0bd387
--- /dev/null
+++ b/nitime/algorithms/entropy.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+
+import itertools
+import numpy as np
+
+
+def entropy(*X):
+ """
+ Calculate the entropy of a variable, or joint entropy of several variables.
+
+ Parameters
+ ----------
+ X : array, or list of arrays
+ Variable or variables to compute entropy/joint entropy on
+
+ Notes
+ -----
+ This function can be used to calculate the entropy of a single variable
+ (provided as a single input) or to calculate the joint entropy between two
+ variables (provided as a series of inputs)
+ """
+ n_instances = len(X[0])
+ H = 0
+ for classes in itertools.product(*[set(x) for x in X]):
+ v = np.array([True] * n_instances)
+ for predictions, c in zip(X, classes):
+ v = np.logical_and(v, predictions == c)
+ p = np.mean(v)
+ H += -p * np.log2(p) if p > 0 else 0
+ return H
+
+
+def conditional_entropy(x, y):
+ """
+ The conditional entropy H(X|Y) = H(Y,X) - H(Y). X conditioned on Y
+ """
+ H_y = entropy(y)
+ H_yx = entropy(y, x)
+ return H_yx - H_y
+
+
+def mutual_information(x, y):
+ """
+ The mutual information between two variables
+
+ MI(X, Y) = H(X) + H(Y) - H(X | Y)
+
+ Parameters
+ ----------
+ x, y : array
+
+ Returns
+ -------
+ array : mutual information between x and y
+ """
+ H_x = entropy(x)
+ H_y = entropy(y)
+ H_xy = entropy(x, y)
+ return H_x + H_y - H_xy
+
+
+def entropy_cc(x, y):
+ """
+ The entropy correlation coefficient:
+
+ p(H) = sqrt(MI(X, Y) / 0.5 * (H(X) + H(Y)))
+ """
+ H_x = entropy(x)
+ H_y = entropy(y)
+ I_xy = mutual_information(y, x)
+ return np.sqrt(I_xy / (0.5 * (H_x + H_y)))
+
+
+def transfer_entropy(x, y, lag=1):
+ """
+ Transfer entropy for two given signals.
+
+ Parameters
+ ----------
+ x : array
+ source
+ y : array
+ target
+ lag : int
+
+ Returns
+ -------
+ array : Transfer entropy from x to y
+ """
+ # Future of i
+ Fi = np.roll(x, -lag)
+ # Past of i
+ Pi = x
+ # Past of j
+ Pj = y
+
+ # Transfer entropy
+ Inf_from_Pi_to_Fi = conditional_entropy(Fi, Pi)
+
+ # Same as cond_entropy(Fi, Pi_Pj)
+ H_y = entropy(Pi, Pj)
+ H_yx = entropy(Fi, Pj, Pi)
+ Inf_from_Pi_Pj_to_Fi = H_yx - H_y
+
+ TE_from_j_to_i = Inf_from_Pi_to_Fi - Inf_from_Pi_Pj_to_Fi
+
+ return TE_from_j_to_i
diff --git a/nitime/algorithms/event_related.py b/nitime/algorithms/event_related.py
index be6f86e..f34be99 100644
--- a/nitime/algorithms/event_related.py
+++ b/nitime/algorithms/event_related.py
@@ -29,7 +29,7 @@ def fir(timeseries, design):
where A is a (number of TRs) x (length of HRF) matrix with a unity
matrix placed with its top left corner placed in each TR in which
- event of type A occured in the design. B is the equivalent for
+ event of type A occurred in the design. B is the equivalent for
events of type B, etc.
Returns
@@ -55,10 +55,7 @@ def fir(timeseries, design):
and Unbiased Approach. Human Brain Mapping, 11:249-260
"""
- X = np.matrix(design)
- y = np.matrix(timeseries)
- h = np.array(linalg.pinv(X.T * X) * X.T * y.T)
- return h
+ return linalg.pinv(design.T @design) @ design.T @ timeseries.T
def freq_domain_xcorr(tseries, events, t_before, t_after, Fs=1):
@@ -98,8 +95,8 @@ def freq_domain_xcorr(tseries, events, t_before, t_after, Fs=1):
xcorr = np.real(fftshift(ifft(fft(tseries) *
fft(np.fliplr([events])))))
- return xcorr[0][np.ceil(len(xcorr[0]) / 2) - t_before * Fs:
- np.ceil(len(xcorr[0]) / 2) + t_after / 2 * Fs] / np.sum(events)
+ return xcorr[0][int(np.ceil(len(xcorr[0]) // 2) - t_before * Fs):
+ int(np.ceil(len(xcorr[0]) // 2) + t_after // 2 * Fs)] / np.sum(events)
def freq_domain_xcorr_zscored(tseries, events, t_before, t_after, Fs=1):
@@ -144,7 +141,7 @@ def freq_domain_xcorr_zscored(tseries, events, t_before, t_after, Fs=1):
meanSurr = np.mean(xcorr)
stdSurr = np.std(xcorr)
- return (((xcorr[0][np.ceil(len(xcorr[0]) / 2) - t_before * Fs:
- np.ceil(len(xcorr[0]) / 2) + t_after * Fs])
- - meanSurr)
- / stdSurr)
+ return (((xcorr[0][int(np.ceil(len(xcorr[0]) // 2) - t_before * Fs):
+ int(np.ceil(len(xcorr[0]) // 2) + t_after * Fs)]) -
+ meanSurr) /
+ stdSurr)
diff --git a/nitime/algorithms/filter.py b/nitime/algorithms/filter.py
index 829600a..b988836 100644
--- a/nitime/algorithms/filter.py
+++ b/nitime/algorithms/filter.py
@@ -33,7 +33,7 @@ def boxcar_filter(time_series, lb=0, ub=0.5, n_iterations=2):
n = time_series.shape[-1]
len_boxcar_ub = np.ceil(1 / (2.0 * ub))
- boxcar_ub = np.empty(len_boxcar_ub)
+ boxcar_ub = np.empty(int(len_boxcar_ub))
boxcar_ub.fill(1.0 / len_boxcar_ub)
boxcar_ones_ub = np.ones_like(boxcar_ub)
@@ -41,7 +41,7 @@ def boxcar_filter(time_series, lb=0, ub=0.5, n_iterations=2):
lb = None
else:
len_boxcar_lb = np.ceil(1 / (2.0 * lb))
- boxcar_lb = np.empty(len_boxcar_lb)
+ boxcar_lb = np.empty(int(len_boxcar_lb))
boxcar_lb.fill(1.0 / len_boxcar_lb)
boxcar_ones_lb = np.ones_like(boxcar_lb)
@@ -51,24 +51,26 @@ def boxcar_filter(time_series, lb=0, ub=0.5, n_iterations=2):
time_series = np.array([time_series])
for i in range(time_series.shape[0]):
if ub:
- #Start by applying a low-pass to the signal. Pad the signal on
- #each side with the initial and terminal signal value:
+ # Start by applying a low-pass to the signal. Pad the signal on
+ # each side with the initial and terminal signal value:
pad_s = np.hstack((boxcar_ones_ub *
time_series[i, 0], time_series[i]))
pad_s = np.hstack((pad_s, boxcar_ones_ub * time_series[i, -1]))
- #Filter operation is a convolution with the box-car(iterate,
- #n_iterations times over this operation):
+ # Filter operation is a convolution with the box-car(iterate,
+ # n_iterations times over this operation):
for iteration in range(n_iterations):
conv_s = np.convolve(pad_s, boxcar_ub)
- #Extract the low pass signal by excising the central
- #len(time_series) points:
- time_series[i] = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):
- conv_s.shape[-1] / 2 + np.ceil(n / 2.)]
+ # Extract the low pass signal by excising the central
+ # len(time_series) points:
+ time_series[i] = conv_s[conv_s.shape[-1] // 2 -
+ int(np.floor(n / 2.)):
+ conv_s.shape[-1] // 2 +
+ int(np.ceil(n / 2.))]
- #Now, if there is a high-pass, do the same, but in the end subtract out
- #the low-passed signal:
+ # Now, if there is a high-pass, do the same, but in the end subtract
+ # out the low-passed signal:
if lb:
pad_s = np.hstack((boxcar_ones_lb *
time_series[i, 0], time_series[i]))
@@ -81,8 +83,8 @@ def boxcar_filter(time_series, lb=0, ub=0.5, n_iterations=2):
#Extract the low pass signal by excising the central
#len(time_series) points:
- s_lp = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):
- conv_s.shape[-1] / 2 + np.ceil(n / 2.)]
+ s_lp = conv_s[conv_s.shape[-1] // 2 - int(np.floor(n / 2.)):
+ conv_s.shape[-1] // 2 + int(np.ceil(n / 2.))]
#Extract the high pass signal simply by subtracting the high pass
#signal from the original signal:
diff --git a/nitime/algorithms/spectral.py b/nitime/algorithms/spectral.py
index e240f5a..d21400b 100644
--- a/nitime/algorithms/spectral.py
+++ b/nitime/algorithms/spectral.py
@@ -15,6 +15,7 @@ from nitime.lazy import scipy_interpolate as interpolate
from nitime.lazy import scipy_fftpack as fftpack
import nitime.utils as utils
+from nitime.utils import tapered_spectra, dpss_windows
# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices, triu_indices
@@ -23,7 +24,7 @@ from nitime.index_utils import tril_indices, triu_indices
# Set global variables for the default NFFT to be used in spectral analysis and
# the overlap:
default_nfft = 64
-default_n_overlap = int(np.ceil(default_nfft / 2.0))
+default_n_overlap = int(np.ceil(default_nfft // 2))
def get_spectra(time_series, method=None):
r"""
@@ -104,14 +105,14 @@ def get_spectra(time_series, method=None):
Fs = method.get('Fs', 2 * np.pi)
detrend = method.get('detrend', mlab.detrend_none)
window = method.get('window', mlab.window_hanning)
- n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2.0)))
+ n_overlap = method.get('n_overlap', int(np.ceil(NFFT // 2)))
# The length of the spectrum depends on how many sides are taken, which
# depends on whether or not this is a complex object:
if np.iscomplexobj(time_series):
fxy_len = NFFT
else:
- fxy_len = NFFT / 2.0 + 1
+ fxy_len = NFFT // 2 + 1
# If there is only 1 channel in the time-series:
if len(time_series.shape) == 1 or time_series.shape[0] == 1:
@@ -119,8 +120,7 @@ def get_spectra(time_series, method=None):
NFFT, Fs, detrend, window, n_overlap,
scale_by_freq=True)
- fxy = temp.squeeze() # the output of mlab.csd has a weird
- # shape
+ fxy = temp.squeeze() # the output of mlab.csd has a weird shape
else:
fxy = np.zeros((time_series.shape[0],
time_series.shape[0],
@@ -244,12 +244,13 @@ def periodogram(s, Fs=2 * np.pi, Sk=None, N=None,
if sides == 'onesided':
# putative Nyquist freq
- Fn = N / 2 + 1
+ Fn = N // 2 + 1
# last duplicate freq
- Fl = (N + 1) / 2
+ Fl = (N + 1) // 2
pshape[-1] = Fn
P = np.zeros(pshape, 'd')
- freqs = np.linspace(0, Fs / 2, Fn)
+ #freqs = np.linspace(0, Fs // 2, Fn)
+ freqs = np.fft.rfftfreq(N) * Fs
P[..., 0] = (Sk[..., 0] * Sk[..., 0].conj()).real
P[..., 1:Fl] = 2 * (Sk[..., 1:Fl] * Sk[..., 1:Fl].conj()).real
if Fn > Fl:
@@ -307,7 +308,7 @@ def periodogram_csd(s, Fs=2 * np.pi, Sk=None, NFFT=None, sides='default',
"""
s_shape = s.shape
- s.shape = (np.prod(s_shape[:-1]), s_shape[-1])
+ s.shape = (-1, s_shape[-1])
# defining an Sk_loc is a little opaque, but it avoids having to
# reset the shape of any user-given Sk later on
if Sk is not None:
@@ -333,9 +334,9 @@ def periodogram_csd(s, Fs=2 * np.pi, Sk=None, NFFT=None, sides='default',
if sides == 'onesided':
# putative Nyquist freq
- Fn = N / 2 + 1
+ Fn = N // 2 + 1
# last duplicate freq
- Fl = (N + 1) / 2
+ Fl = (N + 1) // 2
csd_pairs = np.zeros((M, M, Fn), 'D')
freqs = np.linspace(0, Fs / 2, Fn)
for i in range(M):
@@ -364,202 +365,6 @@ def periodogram_csd(s, Fs=2 * np.pi, Sk=None, NFFT=None, sides='default',
return freqs, csd_mat
-def dpss_windows(N, NW, Kmax, interp_from=None, interp_kind='linear'):
- """
- Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
- for a given frequency-spacing multiple NW and sequence length N.
-
- Parameters
- ----------
- N : int
- sequence length
- NW : float, unitless
- standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt
- but with dt taken as 1
- Kmax : int
- number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
- interp_from : int (optional)
- The dpss can be calculated using interpolation from a set of dpss
- with the same NW and Kmax, but shorter N. This is the length of this
- shorter set of dpss windows.
- interp_kind : str (optional)
- This input variable is passed to scipy.interpolate.interp1d and
- specifies the kind of interpolation as a string ('linear', 'nearest',
- 'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
- order of the spline interpolator to use.
-
-
- Returns
- -------
- v, e : tuple,
- v is an array of DPSS windows shaped (Kmax, N)
- e are the eigenvalues
-
- Notes
- -----
- Tridiagonal form of DPSS calculation from:
-
- Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
- uncertainty V: The discrete case. Bell System Technical Journal,
- Volume 57 (1978), 1371430
- """
- Kmax = int(Kmax)
- W = float(NW) / N
- nidx = np.arange(N, dtype='d')
-
- # In this case, we create the dpss windows of the smaller size
- # (interp_from) and then interpolate to the larger size (N)
- if interp_from is not None:
- if interp_from > N:
- e_s = 'In dpss_windows, interp_from is: %s ' % interp_from
- e_s += 'and N is: %s. ' % N
- e_s += 'Please enter interp_from smaller than N.'
- raise ValueError(e_s)
- dpss = []
- d, e = dpss_windows(interp_from, NW, Kmax)
- for this_d in d:
- x = np.arange(this_d.shape[-1])
- I = interpolate.interp1d(x, this_d, kind=interp_kind)
- d_temp = I(np.arange(0, this_d.shape[-1] - 1,
- float(this_d.shape[-1] - 1) / N))
-
- # Rescale:
- d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2))
-
- dpss.append(d_temp)
-
- dpss = np.array(dpss)
-
- else:
- # here we want to set up an optimization problem to find a sequence
- # whose energy is maximally concentrated within band [-W,W].
- # Thus, the measure lambda(T,W) is the ratio between the energy within
- # that band, and the total energy. This leads to the eigen-system
- # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
- # eigenvalue is the sequence with maximally concentrated energy. The
- # collection of eigenvectors of this system are called Slepian
- # sequences, or discrete prolate spheroidal sequences (DPSS). Only the
- # first K, K = 2NW/dt orders of DPSS will exhibit good spectral
- # concentration
- # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
-
- # Here I set up an alternative symmetric tri-diagonal eigenvalue
- # problem such that
- # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
- # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
- # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
- # [see Percival and Walden, 1993]
- diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
- off_diag = np.zeros_like(nidx)
- off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
- # put the diagonals in LAPACK "packed" storage
- ab = np.zeros((2, N), 'd')
- ab[1] = diagonal
- ab[0, 1:] = off_diag[:-1]
- # only calculate the highest Kmax eigenvalues
- w = linalg.eigvals_banded(ab, select='i',
- select_range=(N - Kmax, N - 1))
- w = w[::-1]
-
- # find the corresponding eigenvectors via inverse iteration
- t = np.linspace(0, np.pi, N)
- dpss = np.zeros((Kmax, N), 'd')
- for k in range(Kmax):
- dpss[k] = utils.tridi_inverse_iteration(
- diagonal, off_diag, w[k], x0=np.sin((k + 1) * t)
- )
-
- # By convention (Percival and Walden, 1993 pg 379)
- # * symmetric tapers (k=0,2,4,...) should have a positive average.
- # * antisymmetric tapers should begin with a positive lobe
- fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
- for i, f in enumerate(fix_symmetric):
- if f:
- dpss[2 * i] *= -1
- # rather than test the sign of one point, test the sign of the
- # linear slope up to the first (largest) peak
- pk = np.argmax( np.abs(dpss[1::2, :N/2]), axis=1 )
- for i, p in enumerate(pk):
- if np.sum(dpss[2 * i + 1, :p]) < 0:
- dpss[2 * i + 1] *= -1
-
- # Now find the eigenvalues of the original spectral concentration problem
- # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
- dpss_rxx = utils.autocorr(dpss) * N
- r = 4 * W * np.sinc(2 * W * nidx)
- r[0] = 2 * W
- eigvals = np.dot(dpss_rxx, r)
-
- return dpss, eigvals
-
-def tapered_spectra(s, tapers, NFFT=None, low_bias=True):
- """
- Compute the tapered spectra of the rows of s.
-
- Parameters
- ----------
-
- s : ndarray, (n_arr, n_pts)
- An array whose rows are timeseries.
-
- tapers : ndarray or container
- Either the precomputed DPSS tapers, or the pair of parameters
- (NW, K) needed to compute K tapers of length n_pts.
-
- NFFT : int
- Number of FFT bins to compute
-
- low_bias : Boolean
- If compute DPSS, automatically select tapers corresponding to
- > 90% energy concentration.
-
- Returns
- -------
-
- t_spectra : ndarray, shaped (n_arr, K, NFFT)
- The FFT of the tapered sequences in s. First dimension is squeezed
- out if n_arr is 1.
- eigvals : ndarray
- The eigenvalues are also returned if DPSS are calculated here.
-
- """
- N = s.shape[-1]
- # XXX: don't allow NFFT < N -- not every implementation is so restrictive!
- if NFFT is None or NFFT < N:
- NFFT = N
- rest_of_dims = s.shape[:-1]
- M = int(np.product(rest_of_dims))
-
- s = s.reshape(int(np.product(rest_of_dims)), N)
- # de-mean this sucker
- s = utils.remove_bias(s, axis=-1)
-
- if not isinstance(tapers, np.ndarray):
- # then tapers is (NW, K)
- args = (N,) + tuple(tapers)
- dpss, eigvals = dpss_windows(*args)
- if low_bias:
- keepers = (eigvals > 0.9)
- dpss = dpss[keepers]
- eigvals = eigvals[keepers]
- tapers = dpss
- else:
- eigvals = None
- K = tapers.shape[0]
- sig_sl = [slice(None)] * len(s.shape)
- sig_sl.insert(len(s.shape) - 1, np.newaxis)
-
- # tapered.shape is (M, Kmax, N)
- tapered = s[sig_sl] * tapers
-
- # compute the y_{i,k}(f) -- full FFT takes ~1.5x longer, but unpacking
- # results of real-valued FFT eats up memory
- t_spectra = fftpack.fft(tapered, n=NFFT, axis=-1)
- t_spectra.shape = rest_of_dims + (K, NFFT)
- if eigvals is None:
- return t_spectra
- return t_spectra, eigvals
-
def mtm_cross_spectrum(tx, ty, weights, sides='twosided'):
r"""
@@ -617,7 +422,7 @@ def mtm_cross_spectrum(tx, ty, weights, sides='twosided'):
if sides == 'onesided':
# where the nyq freq should be
- Fn = N / 2 + 1
+ Fn = N // 2 + 1
truncated_slice = [slice(None)] * len(tx.shape)
truncated_slice[-1] = slice(0, Fn)
tsl = tuple(truncated_slice)
@@ -636,7 +441,7 @@ def mtm_cross_spectrum(tx, ty, weights, sides='twosided'):
if sides == 'onesided':
# dbl power at duplicated freqs
- Fl = (N + 1) / 2
+ Fl = (N + 1) // 2
sub_slice = [slice(None)] * len(sf.shape)
sub_slice[-1] = slice(1, Fl)
sf[tuple(sub_slice)] *= 2
@@ -718,7 +523,7 @@ def multi_taper_psd(
"""
# have last axis be time series for now
N = s.shape[-1]
- M = int(np.product(s.shape[:-1]))
+ M = int(np.prod(s.shape[:-1]))
if BW is not None:
# BW wins in a contest (since it was the original implementation)
@@ -747,7 +552,7 @@ def multi_taper_psd(
# collapse spectra's shape back down to 3 dimensions
spectra.shape = (M, K, NFFT)
- last_freq = NFFT / 2 + 1 if sides == 'onesided' else NFFT
+ last_freq = NFFT // 2 + 1 if sides == 'onesided' else NFFT
# degrees of freedom at each timeseries, at each freq
nu = np.empty((M, last_freq))
@@ -781,9 +586,9 @@ def multi_taper_psd(
spectra, spectra, weights, sides=sides
)
sdf_est /= Fs
-
+
if sides == 'onesided':
- freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
+ freqs = np.linspace(0, Fs / 2, NFFT // 2 + 1)
else:
freqs = np.linspace(0, Fs, NFFT, endpoint=False)
@@ -858,7 +663,7 @@ def multi_taper_csd(s, Fs=2 * np.pi, NW=None, BW=None, low_bias=True,
"""
# have last axis be time series for now
N = s.shape[-1]
- M = int(np.product(s.shape[:-1]))
+ M = int(np.prod(s.shape[:-1]))
if BW is not None:
# BW wins in a contest (since it was the original implementation)
@@ -888,7 +693,7 @@ def multi_taper_csd(s, Fs=2 * np.pi, NW=None, BW=None, low_bias=True,
spectra.shape = (M, K, NFFT)
# compute the cross-spectral density functions
- last_freq = NFFT / 2 + 1 if sides == 'onesided' else NFFT
+ last_freq = NFFT // 2 + 1 if sides == 'onesided' else NFFT
if adaptive:
w = np.empty((M, K, last_freq))
@@ -920,9 +725,9 @@ def multi_taper_csd(s, Fs=2 * np.pi, NW=None, BW=None, low_bias=True,
diag_idc = (np.arange(M), np.arange(M))
csdfs[diag_idc] /= 2
csdfs /= Fs
-
+
if sides == 'onesided':
- freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
+ freqs = np.linspace(0, Fs / 2, NFFT // 2 + 1)
else:
freqs = np.linspace(0, Fs, NFFT, endpoint=False)
diff --git a/nitime/algorithms/tests/test_autoregressive.py b/nitime/algorithms/tests/test_autoregressive.py
index 3eb2027..294de3d 100644
--- a/nitime/algorithms/tests/test_autoregressive.py
+++ b/nitime/algorithms/tests/test_autoregressive.py
@@ -1,6 +1,5 @@
import numpy as np
import numpy.testing as npt
-import numpy.testing.decorators as dec
import nitime.algorithms as tsa
import nitime.utils as utils
@@ -18,7 +17,7 @@ def _random_poles(half_poles=3):
stable_pole_idx = np.where(poles_rp ** 2 + poles_ip ** 2 < .75 ** 2)[0]
# keep 3 of these, and supplement with complex conjugate
stable_poles = poles_rp[stable_pole_idx[:half_poles]] + \
- 1j * poles_ip[stable_pole_idx[:half_poles]]
+ 1j * poles_ip[stable_pole_idx[:half_poles]]
stable_poles = np.r_[stable_poles, stable_poles.conj()]
# we have the roots, now find the polynomial
ak = np.poly(stable_poles)
@@ -27,7 +26,7 @@ def _random_poles(half_poles=3):
def test_AR_est_consistency():
order = 10 # some even number
- ak = _random_poles(order / 2)
+ ak = _random_poles(order // 2)
x, v, _ = utils.ar_generator(N=512, coefs=-ak[1:], drop_transients=100)
ak_yw, ssq_yw = tsa.AR_est_YW(x, order)
ak_ld, ssq_ld = tsa.AR_est_LD(x, order)
@@ -87,7 +86,6 @@ def test_AR_LD():
npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)
-@dec.slow
def test_MAR_est_LWR():
"""
diff --git a/nitime/algorithms/tests/test_coherence.py b/nitime/algorithms/tests/test_coherence.py
index 2cb325f..e0e878e 100644
--- a/nitime/algorithms/tests/test_coherence.py
+++ b/nitime/algorithms/tests/test_coherence.py
@@ -3,31 +3,27 @@
Tests of functions under algorithms.coherence
"""
-import nose
import os
import warnings
import numpy as np
import numpy.testing as npt
-from scipy.signal import signaltools
-
-try:
- import matplotlib
- import matplotlib.mlab as mlab
- has_mpl = True
- # Matplotlib older than 0.99 will have some issues with the normalization
- # of t:
- if float(matplotlib.__version__[:3]) < 0.99:
- w_s = "You have a relatively old version of Matplotlib. "
- w_s += " Estimation of the PSD DC component might not be as expected."
- w_s +=" Consider updating Matplotlib: http://matplotlib.sourceforge.net/"
- warnings.warn(w_s, Warning)
- old_mpl = True
- else:
- old_mpl = False
-
-except ImportError:
- raise nose.SkipTest()
+from scipy import signal
+import pytest
+
+import matplotlib
+import matplotlib.mlab as mlab
+has_mpl = True
+# Matplotlib older than 0.99 will have some issues with the normalization
+# of t:
+if float(matplotlib.__version__[:3]) < 0.99:
+ w_s = "You have a relatively old version of Matplotlib. "
+ w_s += " Estimation of the PSD DC component might not be as expected."
+ w_s +=" Consider updating Matplotlib: http://matplotlib.sourceforge.net/"
+ warnings.warn(w_s, Warning)
+ old_mpl = True
+else:
+ old_mpl = False
from scipy import fftpack
@@ -70,7 +66,6 @@ def test_coherency():
if method is not None and method['this_method'] != "multi_taper_csd":
f_theoretical = utils.get_freqs(method['Fs'], method['NFFT'])
npt.assert_array_almost_equal(f, f_theoretical)
- npt.assert_array_almost_equal(f, f_theoretical)
def test_coherence():
@@ -206,9 +201,9 @@ def test_correlation_spectrum():
# XXX FIXME: http://github.com/nipy/nitime/issues/issue/1
-@npt.dec.skipif(True)
+@pytest.mark.skipif(True, reason="http://github.com/nipy/nitime/issues/issue/1")
def test_coherence_linear_dependence():
- """
+ r"""
Tests that the coherence between two linearly dependent time-series
behaves as expected.
@@ -225,8 +220,8 @@ def test_coherence_linear_dependence():
"""
t = np.linspace(0, 16 * np.pi, 2 ** 14)
- x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + \
- 0.1 * np.random.rand(t.shape[-1])
+ x = (np.sin(t) + np.sin(2 * t) + np.sin(3 * t) +
+ 0.1 * np.random.rand(t.shape[-1]))
N = x.shape[-1]
alpha = 10
@@ -234,8 +229,8 @@ def test_coherence_linear_dependence():
noise = 0.1 * np.random.randn(t.shape[-1])
y = alpha * np.roll(x, m) + noise
- f_noise = fftpack.fft(noise)[0:N / 2]
- f_x = fftpack.fft(x)[0:N / 2]
+ f_noise = fftpack.fft(noise)[0:N // 2]
+ f_x = fftpack.fft(x)[0:N // 2]
c_t = (1 / (1 + (f_noise / (f_x * (alpha ** 2)))))
@@ -244,7 +239,7 @@ def test_coherence_linear_dependence():
"Fs": 2 * np.pi}
f, c = tsa.coherence(np.vstack([x, y]), csd_method=method)
- c_t = np.abs(signaltools.resample(c_t, c.shape[-1]))
+ c_t = np.abs(signal.resample(c_t, c.shape[-1]))
npt.assert_array_almost_equal(c[0, 1], c_t, 2)
@@ -262,7 +257,7 @@ def test_coherence_matlab():
method['this_method'] = 'welch'
method['NFFT'] = 64
method['Fs'] = 1.0
- method['noverlap'] = method['NFFT'] / 2
+ method['noverlap'] = method['NFFT'] // 2
ttt = np.vstack([ts0, ts1])
f, cxy_mlab = tsa.coherence(ttt, csd_method=method)
@@ -270,7 +265,7 @@ def test_coherence_matlab():
npt.assert_almost_equal(cxy_mlab[0][1], cxy_matlab, decimal=5)
-@npt.dec.skipif(old_mpl)
+@pytest.mark.skipif(old_mpl, reason="MPL version before 0.99")
def test_cached_coherence():
"""Testing the cached coherence functions """
NFFT = 64 # This is the default behavior
@@ -300,7 +295,8 @@ def test_cached_coherence():
# Only welch PSD works and an error is thrown otherwise. This tests that
# the error is thrown:
- npt.assert_raises(ValueError, tsa.cache_fft, ts, ij, method=methods[2])
+ with pytest.raises(ValueError) as e_info:
+ tsa.cache_fft(ts, ij, method=methods[2])
# Take the method in which the window is defined on input:
freqs, cache1 = tsa.cache_fft(ts, ij, method=methods[3])
diff --git a/nitime/algorithms/tests/test_entropy.py b/nitime/algorithms/tests/test_entropy.py
new file mode 100644
index 0000000..aa4445c
--- /dev/null
+++ b/nitime/algorithms/tests/test_entropy.py
@@ -0,0 +1,49 @@
+import numpy as np
+import numpy.testing as npt
+
+import nitime.algorithms as tsa
+
+np.random.seed(1945)
+
+def test_entropy():
+ x = np.random.randint(0, 2, size=1000)
+ e1 = tsa.entropy(x)
+ npt.assert_almost_equal(e1, 1, decimal=2)
+ # The joint entropy of the variable with itself is the same:
+ e2 = tsa.entropy(x, x)
+ npt.assert_almost_equal(e1, e2)
+ y = np.random.randint(0, 2, size=1000)
+ # Joint entropy with another random variable is 2:
+ e3 = tsa.entropy(x, y)
+ npt.assert_almost_equal(e3, 2, decimal=2)
+
+
+def test_conditional_entropy():
+ x = np.random.randint(0, 2, size=1000)
+ y = np.random.randint(0, 2, size=1000)
+ e1 = tsa.conditional_entropy(x, x)
+ npt.assert_almost_equal(e1, 0)
+ e2 = tsa.conditional_entropy(x, y)
+ npt.assert_almost_equal(e2, 1, decimal=2)
+
+
+def test_mutual_information():
+ x = np.random.randint(0, 2, size=1000)
+ y = np.random.randint(0, 2, size=1000)
+ e1 = tsa.mutual_information(x, x)
+ npt.assert_almost_equal(e1, 1, decimal=2)
+ e2 = tsa.mutual_information(x, y)
+ npt.assert_almost_equal(e2, 0, decimal=2)
+
+
+def test_entropy_cc():
+ x = np.random.randint(0, 2, size=1000)
+ e1 = tsa.entropy_cc(x, x)
+ npt.assert_almost_equal(e1, 1, decimal=2)
+
+
+def test_transfer_entropy():
+ x = np.random.randint(0, 4, size=1000)
+ y = np.roll(x, -1)
+ e1 = tsa.transfer_entropy(x, y, lag=1)
+ npt.assert_almost_equal(e1, 2, decimal=1)
diff --git a/nitime/algorithms/tests/test_spectral.py b/nitime/algorithms/tests/test_spectral.py
index ad22390..566557e 100644
--- a/nitime/algorithms/tests/test_spectral.py
+++ b/nitime/algorithms/tests/test_spectral.py
@@ -7,8 +7,7 @@ import numpy as np
import scipy
from scipy import fftpack
import numpy.testing as npt
-import numpy.testing.decorators as dec
-import nose.tools as nt
+import pytest
import nitime.algorithms as tsa
import nitime.utils as utils
@@ -71,12 +70,12 @@ def test_get_spectra_complex():
r, _, _ = utils.ar_generator(N=2 ** 16) # It needs to be that long for
# the answers to converge
c, _, _ = utils.ar_generator(N=2 ** 16)
- arsig1 = r + c * scipy.sqrt(-1)
+ arsig1 = r + c * 1j
r, _, _ = utils.ar_generator(N=2 ** 16)
c, _, _ = utils.ar_generator(N=2 ** 16)
- arsig2 = r + c * scipy.sqrt(-1)
+ arsig2 = r + c * 1j
avg_pwr1.append((arsig1 * arsig1.conjugate()).mean())
avg_pwr2.append((arsig2 * arsig2.conjugate()).mean())
@@ -99,8 +98,8 @@ def test_get_spectra_unknown_method():
"""
tseries = np.array([[1, 2, 3], [4, 5, 6]])
- npt.assert_raises(ValueError,
- tsa.get_spectra, tseries, method=dict(this_method='foo'))
+ with pytest.raises(ValueError) as e_info:
+ tsa.get_spectra(tseries, method=dict(this_method='foo'))
def test_periodogram():
@@ -119,7 +118,7 @@ def test_periodogram():
N = 1024
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
- arsig = r + c * scipy.sqrt(-1)
+ arsig = r + c * 1j
f, c = tsa.periodogram(arsig)
npt.assert_equal(f.shape[0], N) # Should be N, not the one-sided N/2 + 1
@@ -144,11 +143,11 @@ def test_periodogram_csd():
N = 1024
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
- arsig1 = r + c * scipy.sqrt(-1)
+ arsig1 = r + c * 1j
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
- arsig2 = r + c * scipy.sqrt(-1)
+ arsig2 = r + c * 1j
tseries = np.vstack([arsig1, arsig2])
@@ -157,7 +156,7 @@ def test_periodogram_csd():
def test_dpss_windows():
- """ Test a funky corner case of DPSS_windows """
+ """ Test a couple of funky corner cases of DPSS_windows """
N = 1024
NW = 0 # Setting NW to 0 triggers the weird corner case in which some of
@@ -169,6 +168,11 @@ def test_dpss_windows():
for this_d in d[0::2]:
npt.assert_equal(this_d.sum(axis=-1) < 0, False)
+ # Make sure we interpolate to the proper number of points
+ d, w = tsa.dpss_windows(245411, 4, 8, 1000)
+ npt.assert_equal(d.shape[-1], 245411)
+
+
def test_dpss_properties():
""" Test conventions of Slepian eigenvectors """
@@ -176,24 +180,24 @@ def test_dpss_properties():
NW = 200
d, lam = tsa.dpss_windows(N, NW, 2*NW-2)
# 2NW-2 lamdas should be all > 0.9
- nt.assert_true(
+ npt.assert_(
(lam > 0.9).all(), 'Eigenvectors show poor spectral concentration'
)
# test orthonomality
err = np.linalg.norm(d.dot(d.T) - np.eye(2*NW-2), ord='fro')
- nt.assert_true(err**2 < 1e-16, 'Eigenvectors not numerically orthonormal')
+ npt.assert_(err**2 < 1e-16, 'Eigenvectors not numerically orthonormal')
# test positivity of even functions
- nt.assert_true(
+ npt.assert_(
(d[::2].sum(axis=1) > 0).all(),
'Even Slepian sequences should have positive DC'
)
# test positive initial slope of odd functions
# (this tests the sign of a linear slope)
- pk = np.argmax(np.abs(d[1::2, :N/2]), axis=1)
+ pk = np.argmax(np.abs(d[1::2, :N//2]), axis=1)
t = True
for p, f in zip(pk, d[1::2]):
t = t and np.sum( np.arange(1,p+1) * f[:p] ) >= 0
- nt.assert_true(t, 'Odd Slepians should begin positive-going')
+ npt.assert_(t, 'Odd Slepians should begin positive-going')
def test_get_spectra_bi():
"""
@@ -242,29 +246,29 @@ def test_mtm_lin_combo():
mtm_cross = tsa.mtm_cross_spectrum(
spec1, spec2, (weights[0], weights[1]), sides=sides
)
- nt.assert_true(mtm_cross.dtype in np.sctypes['complex'],
+ npt.assert_(mtm_cross.dtype in np.sctypes['complex'],
'Wrong dtype for crossspectrum')
- nt.assert_true(len(mtm_cross) == 51,
+ npt.assert_(len(mtm_cross) == 51,
'Wrong length for halfband spectrum')
sides = 'twosided'
mtm_cross = tsa.mtm_cross_spectrum(
spec1, spec2, (weights[0], weights[1]), sides=sides
)
- nt.assert_true(len(mtm_cross) == 100,
+ npt.assert_(len(mtm_cross) == 100,
'Wrong length for fullband spectrum')
sides = 'onesided'
mtm_auto = tsa.mtm_cross_spectrum(
spec1, spec1, weights[0], sides=sides
)
- nt.assert_true(mtm_auto.dtype in np.sctypes['float'],
+ npt.assert_(mtm_auto.dtype in np.sctypes['float'],
'Wrong dtype for autospectrum')
- nt.assert_true(len(mtm_auto) == 51,
+ npt.assert_(len(mtm_auto) == 51,
'Wrong length for halfband spectrum')
sides = 'twosided'
mtm_auto = tsa.mtm_cross_spectrum(
spec1, spec2, weights[0], sides=sides
)
- nt.assert_true(len(mtm_auto) == 100,
+ npt.assert_(len(mtm_auto) == 100,
'Wrong length for fullband spectrum')
@@ -311,13 +315,10 @@ def test_mtm_cross_spectrum():
npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)
# Test raising of error in case the inputs don't make sense:
- npt.assert_raises(ValueError,
- tsa.mtm_cross_spectrum,
- tspectra, np.r_[tspectra, tspectra],
- (w, w))
+ with pytest.raises(ValueError) as e_info:
+ tsa.mtm_cross_spectrum(tspectra, np.r_[tspectra, tspectra], (w, w))
-@dec.slow
def test_multi_taper_psd_csd():
"""
@@ -436,19 +437,19 @@ def test_periodogram_spectral_normalization():
p1 = np.sum(Xp1) * 2 * np.pi / 2**10
p2 = np.sum(Xp2) * 100 / 2**10
p3 = np.sum(Xp3) * 2 * np.pi / 2**12
- nt.assert_true( np.abs(p1 - p2) < 1e-14,
+ npt.assert_( np.abs(p1 - p2) < 1e-14,
'Inconsistent frequency normalization in periodogram (1)' )
- nt.assert_true( np.abs(p3 - p2) < 1e-8,
+ npt.assert_( np.abs(p3 - p2) < 1e-8,
'Inconsistent frequency normalization in periodogram (2)' )
td_var = np.var(x)
# assure that the estimators are at least in the same
# order of magnitude as the time-domain variance
- nt.assert_true( np.abs(np.log10(p1/td_var)) < 1,
+ npt.assert_( np.abs(np.log10(p1/td_var)) < 1,
'Incorrect frequency normalization in periodogram' )
# check the freq vector while we're here
- nt.assert_true( f2.max() == 50, 'Periodogram returns wrong frequency bins' )
+ npt.assert_( f2.max() == 50, 'Periodogram returns wrong frequency bins' )
def test_multitaper_spectral_normalization():
"""
@@ -464,16 +465,16 @@ def test_multitaper_spectral_normalization():
p1 = np.sum(Xp1) * 2 * np.pi / 2**10
p2 = np.sum(Xp2) * 100 / 2**10
p3 = np.sum(Xp3) * 2 * np.pi / 2**12
- nt.assert_true( np.abs(p1 - p2) < 1e-14,
+ npt.assert_( np.abs(p1 - p2) < 1e-14,
'Inconsistent frequency normalization in MTM PSD (1)' )
- nt.assert_true( np.abs(p3 - p2) < 1e-8,
+ npt.assert_( np.abs(p3 - p2) < 1e-8,
'Inconsistent frequency normalization in MTM PSD (2)' )
td_var = np.var(x)
# assure that the estimators are at least in the same
# order of magnitude as the time-domain variance
- nt.assert_true( np.abs(np.log10(p1/td_var)) < 1,
+ npt.assert_( np.abs(np.log10(p1/td_var)) < 1,
'Incorrect frequency normalization in MTM PSD' )
# check the freq vector while we're here
- nt.assert_true( f2.max() == 50, 'MTM PSD returns wrong frequency bins' )
+ npt.assert_( f2.max() == 50, 'MTM PSD returns wrong frequency bins' )
diff --git a/nitime/analysis/base.py b/nitime/analysis/base.py
index 2541c3e..d0ea92d 100644
--- a/nitime/analysis/base.py
+++ b/nitime/analysis/base.py
@@ -1,5 +1,5 @@
-from inspect import getargspec
+from inspect import getfullargspec
from nitime import descriptors as desc
@@ -16,7 +16,7 @@ class BaseAnalyzer(desc.ResetMixin):
@desc.setattr_on_read
def parameterlist(self):
- plist = getargspec(self.__init__).args
+ plist = getfullargspec(self.__init__).args
plist.remove('self')
plist.remove('input')
return plist
diff --git a/nitime/analysis/coherence.py b/nitime/analysis/coherence.py
index d66115e..787b4c6 100644
--- a/nitime/analysis/coherence.py
+++ b/nitime/analysis/coherence.py
@@ -8,7 +8,7 @@ from nitime import descriptors as desc
from nitime import utils as tsu
from nitime import algorithms as tsa
-# To suppport older versions of numpy that don't have tril_indices:
+# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices, triu_indices
from .base import BaseAnalyzer
@@ -119,7 +119,7 @@ class CoherenceAnalyzer(BaseAnalyzer):
def spectrum(self):
"""
The spectra of each of the channels and cross-spectra between
- different channles in the input TimeSeries object
+ different channels in the input TimeSeries object
"""
f, spectrum = tsa.get_spectra(self.input.data, method=self.method)
return spectrum
@@ -288,7 +288,7 @@ class MTCoherenceAnalyzer(BaseAnalyzer):
self.bandwidth = self.NW * (2 * Fs) / N
self.alpha = alpha
- self._L = self.input.data.shape[-1] / 2 + 1
+ self._L = self.input.data.shape[-1] // 2 + 1
self._adaptive = adaptive
@desc.setattr_on_read
@@ -304,7 +304,7 @@ class MTCoherenceAnalyzer(BaseAnalyzer):
@desc.setattr_on_read
def df(self):
# The degrees of freedom:
- return 2 * self.NW - 1
+ return int(2 * self.NW - 1)
@desc.setattr_on_read
def spectra(self):
@@ -328,9 +328,9 @@ class MTCoherenceAnalyzer(BaseAnalyzer):
else:
wshape = [1] * len(self.spectra.shape)
wshape[0] = channel_n
- wshape[-2] = int(self.df)
+ wshape[-2] = self.df
pre_w = np.sqrt(self.eigs) + np.zeros((wshape[0],
- self.eigs.shape[0]))
+ self.eigs.shape[0]))
w = pre_w.reshape(*wshape)
@@ -351,7 +351,7 @@ class MTCoherenceAnalyzer(BaseAnalyzer):
self.weights[i],
sides='onesided')
syy = tsa.mtm_cross_spectrum(self.spectra[j], self.spectra[j],
- self.weights[i],
+ self.weights[j],
sides='onesided')
psd_mat[0, i, j] = sxx
psd_mat[1, i, j] = syy
@@ -653,10 +653,10 @@ class SeedCoherenceAnalyzer(object):
if len(self.seed.shape) > 1:
Cxy = np.empty((self.seed.data.shape[0],
self.target.data.shape[0],
- self.frequencies.shape[0]), dtype=np.complex)
+ self.frequencies.shape[0]), dtype=complex)
else:
Cxy = np.empty((self.target.data.shape[0],
- self.frequencies.shape[0]), dtype=np.complex)
+ self.frequencies.shape[0]), dtype=complex)
#Get the fft window cache for the target time-series:
cache = self.target_cache
diff --git a/nitime/analysis/correlation.py b/nitime/analysis/correlation.py
index a0763b8..27359c4 100644
--- a/nitime/analysis/correlation.py
+++ b/nitime/analysis/correlation.py
@@ -4,7 +4,7 @@ from nitime import descriptors as desc
from nitime import timeseries as ts
from nitime import algorithms as tsa
-# To suppport older versions of numpy that don't have tril_indices:
+# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices
from .base import BaseAnalyzer
@@ -147,7 +147,7 @@ class SeedCorrelationAnalyzer(object):
# Preallocate results
Cxy = np.empty((self.seed.data.shape[0],
- self.target.data.shape[0]), dtype=np.float)
+ self.target.data.shape[0]), dtype=float)
for seed_idx, this_seed in enumerate(self.seed.data):
diff --git a/nitime/analysis/event_related.py b/nitime/analysis/event_related.py
index ad6a324..88708f7 100644
--- a/nitime/analysis/event_related.py
+++ b/nitime/analysis/event_related.py
@@ -25,12 +25,12 @@ class EventRelatedAnalyzer(desc.ResetMixin):
A time-series with data on which the event-related analysis proceeds
events_time_series : a TimeSeries object or an Events object
- The events which occured in tandem with the time-series in the
+ The events which occurred in tandem with the time-series in the
EventRelatedAnalyzer. This object's data has to have the same
dimensions as the data in the EventRelatedAnalyzer object. In each
sample in the time-series, there is an integer, which denotes the
- kind of event which occured at that time. In time-bins in which no
- event occured, a 0 should be entered. The data in this time series
+ kind of event which occurred at that time. In time-bins in which no
+ event occurred, a 0 should be entered. The data in this time series
object needs to have the same dimensionality as the data in the
data time-series
@@ -46,7 +46,7 @@ class EventRelatedAnalyzer(desc.ResetMixin):
point in the event-triggered average (where possible)
offset : the offset of the beginning of the event-related time-series,
- relative to the event occurence
+ relative to the event occurrence
"""
#XXX Change so that the offset and length of the eta can be given in
#units of time
@@ -54,7 +54,7 @@ class EventRelatedAnalyzer(desc.ResetMixin):
#Make sure that the offset and the len_et values can be used, by
#padding with zeros before and after:
- if isinstance(events, ts.TimeSeries):
+ if isinstance(events, ts.TimeSeries):
#Set a flag to indicate the input is a time-series object:
self._is_ts = True
s = time_series.data.shape
@@ -67,8 +67,8 @@ class EventRelatedAnalyzer(desc.ResetMixin):
if len(events.shape) == 1 and len(s) > 1:
e_data = e_data + np.zeros((s[0], 1))
- zeros_before = np.zeros((s[:-1] + (abs(offset),)))
- zeros_after = np.zeros((s[:-1] + (abs(len_et),)))
+ zeros_before = np.zeros((s[:-1] + (int(offset),)))
+ zeros_after = np.zeros((s[:-1] + (int(len_et),)))
time_series_data = np.hstack([zeros_before,
time_series.data,
zeros_after])
@@ -195,17 +195,19 @@ class EventRelatedAnalyzer(desc.ResetMixin):
u = np.unique(self.events[i])
event_types = u[np.unique(self.events[i]) != 0]
h[i] = np.empty((event_types.shape[0],
- self.len_et / 2),
+ self.len_et // 2),
dtype=complex)
for e_idx in range(event_types.shape[0]):
this_e = (self.events[i] == event_types[e_idx]) * 1.0
if self._zscore:
- this_h = tsa.freq_domain_xcorr_zscored(data,
+ this_h = tsa.freq_domain_xcorr_zscored(
+ data,
this_e,
-self.offset + 1,
self.len_et - self.offset - 2)
else:
- this_h = tsa.freq_domain_xcorr(data,
+ this_h = tsa.freq_domain_xcorr(
+ data,
this_e,
-self.offset + 1,
self.len_et - self.offset - 2)
@@ -224,20 +226,20 @@ class EventRelatedAnalyzer(desc.ResetMixin):
@desc.setattr_on_read
def et_data(self):
- """The event-triggered data (all occurences).
+ """The event-triggered data (all occurrences).
This gets the time-series corresponding to the inidividual event
- occurences. Returns a list of lists of time-series. The first dimension
+ occurrences. Returns a list of lists of time-series. The first dimension
is the different channels in the original time-series data and the
second dimension is each type of event in the event time series
The time-series itself has the first diemnsion of the data being the
- specific occurence, with time 0 locked to the that occurence
+ specific occurrence, with time 0 locked to the that occurrence
of the event and the last dimension is time.e
This complicated structure is so that it can deal with situations where
each channel has different events and different events have different #
- of occurences
+ of occurrences
"""
#Make a list for the output
h = [0] * self._len_h
diff --git a/nitime/analysis/granger.py b/nitime/analysis/granger.py
index 6e1efb2..70763a2 100644
--- a/nitime/analysis/granger.py
+++ b/nitime/analysis/granger.py
@@ -11,7 +11,7 @@ from nitime import descriptors as desc
from .base import BaseAnalyzer
-# To suppport older versions of numpy that don't have tril_indices:
+# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices_from
def fit_model(x1, x2, order=None, max_order=10,
@@ -196,7 +196,7 @@ class GrangerAnalyzer(BaseAnalyzer):
# 'Translate' from dict form into matrix form:
for i, j in self.ij:
- arr[j, i, :] = self._granger_causality[key][i, j]
+ arr[i, j, :] = self._granger_causality[key][i, j]
return arr
@desc.setattr_on_read
diff --git a/nitime/analysis/snr.py b/nitime/analysis/snr.py
index a05183f..7ca3a6a 100644
--- a/nitime/analysis/snr.py
+++ b/nitime/analysis/snr.py
@@ -90,7 +90,7 @@ class SNRAnalyzer(BaseAnalyzer):
@desc.setattr_on_read
def mt_frequencies(self):
return np.linspace(0, self.input.sampling_rate / 2,
- self.input.data.shape[-1] / 2 + 1)
+ self.input.data.shape[-1] // 2 + 1)
@desc.setattr_on_read
def mt_signal_psd(self):
@@ -104,7 +104,7 @@ class SNRAnalyzer(BaseAnalyzer):
@desc.setattr_on_read
def mt_noise_psd(self):
p = np.empty((self.noise.data.shape[0],
- self.noise.data.shape[-1] / 2 + 1))
+ self.noise.data.shape[-1] // 2 + 1))
for i in range(p.shape[0]):
_, p[i], _ = tsa.multi_taper_psd(self.noise.data[i],
diff --git a/nitime/analysis/spectral.py b/nitime/analysis/spectral.py
index 02b5410..29721d6 100644
--- a/nitime/analysis/spectral.py
+++ b/nitime/analysis/spectral.py
@@ -1,4 +1,3 @@
-
import numpy as np
from nitime.lazy import scipy
from nitime.lazy import scipy_signal as signal
@@ -92,9 +91,9 @@ class SpectralAnalyzer(BaseAnalyzer):
psd_len = NFFT
dt = complex
else:
- psd_len = NFFT / 2.0 + 1
+ psd_len = NFFT // 2 + 1
dt = float
-
+
#If multi-channel data:
if len(self.input.data.shape) > 1:
psd_shape = (self.input.shape[:-1] + (psd_len,))
@@ -185,7 +184,7 @@ class SpectralAnalyzer(BaseAnalyzer):
else:
f = tsu.get_freqs(sampling_rate, data.shape[-1])
spectrum_fourier = fft(data)[..., :f.shape[0]]
-
+
return f, spectrum_fourier
@desc.setattr_on_read
@@ -197,15 +196,15 @@ class SpectralAnalyzer(BaseAnalyzer):
"""
if np.iscomplexobj(self.input.data):
- psd_len = self.input.shape[-1]
+ psd_len = self.input.shape[-1]
dt = complex
else:
- psd_len = self.input.shape[-1] / 2 + 1
+ psd_len = self.input.shape[-1] // 2 + 1
dt = float
#Initialize the output
spectrum_multi_taper = np.empty((self.input.shape[:-1] + (psd_len,)),
- dtype=dt)
+ dtype=dt)
#If multi-channel data:
if len(self.input.data.shape) > 1:
@@ -277,11 +276,12 @@ class FilterAnalyzer(desc.ResetMixin):
#Initialize all the local variables you will need for all the different
#filtering methods:
- self.data = time_series.data
- self.sampling_rate = time_series.sampling_rate
+ self._ts = time_series
+ self.data = self._ts.data
+ self.sampling_rate = self._ts.sampling_rate
self.ub = ub
self.lb = lb
- self.time_unit = time_series.time_unit
+ self.time_unit = self._ts.time_unit
self._boxcar_iterations = boxcar_iterations
self._gstop = gstop
self._gpass = gpass
@@ -313,30 +313,35 @@ class FilterAnalyzer(desc.ResetMixin):
if in_ts is not None:
data = in_ts.data
Fs = in_ts.sampling_rate
+ t0 = in_ts.t0
+ time_unit = in_ts.time_unit
else:
- data = self.data
- Fs = self.sampling_rate
+ data = self._ts.data
+ Fs = self._ts.sampling_rate
+ t0 = self._ts.t0
+ time_unit = self._ts.time_unit
- #filtfilt only operates channel-by-channel, so we need to loop over the
- #channels, if the data is multi-channel data:
+ # filtfilt only operates channel-by-channel, so we need to loop over
+ # the channels, if the data is multi-channel data:
if len(data.shape) > 1:
out_data = np.empty(data.shape, dtype=data.dtype)
for i in range(data.shape[0]):
out_data[i] = signal.filtfilt(b, a, data[i])
- #Make sure to preserve the DC:
+ # Make sure to preserve the DC:
dc = np.mean(data[i])
- out_data[i] -= np.mean(out_data[i])
- out_data[i] += dc
+ out_data[i] = out_data[i] - np.mean(out_data[i])
+ out_data[i] = out_data[i] + dc
else:
out_data = signal.filtfilt(b, a, data)
- #Make sure to preserve the DC:
+ # Make sure to preserve the DC:
dc = np.mean(data)
out_data -= np.mean(out_data)
out_data += dc
return ts.TimeSeries(out_data,
sampling_rate=Fs,
- time_unit=self.time_unit)
+ time_unit=time_unit,
+ t0=t0)
@desc.setattr_on_read
def fir(self):
@@ -364,8 +369,8 @@ class FilterAnalyzer(desc.ResetMixin):
n_taps = self._filt_order + 1
- #This means the filter order you chose was too large (needs to be
- #shorter than a 1/3 of your time-series )
+ # This means the filter order you chose was too large (needs to be
+ # shorter than a 1/3 of your time-series )
if n_taps > self.data.shape[-1] * 3:
e_s = "The filter order chosen is too large for this time-series"
raise ValueError(e_s)
@@ -373,18 +378,20 @@ class FilterAnalyzer(desc.ResetMixin):
# a is always 1:
a = [1]
- sig = ts.TimeSeries(data=self.data, sampling_rate=self.sampling_rate)
+ sig = ts.TimeSeries(data=self._ts.data,
+ sampling_rate=self._ts.sampling_rate,
+ t0=self._ts.t0)
- #Lowpass:
+ # Lowpass:
if ub_frac < 1:
b = signal.firwin(n_taps, ub_frac, window=self._win)
sig = self.filtfilt(b, a, sig)
- #High-pass
+ # High-pass
if lb_frac > 0:
#Includes a spectral inversion:
b = -1 * signal.firwin(n_taps, lb_frac, window=self._win)
- b[n_taps / 2] = b[n_taps / 2] + 1
+ b[n_taps // 2] = b[n_taps // 2] + 1
sig = self.filtfilt(b, a, sig)
return sig
@@ -411,8 +418,8 @@ class FilterAnalyzer(desc.ResetMixin):
wp = [lb_frac, ub_frac]
- ws = [np.max([lb_frac - 0.1, 0]),
- np.min([ub_frac + 0.1, 1.0])]
+ ws = [np.max([lb_frac - 0.1, 0.001]),
+ np.min([ub_frac + 0.1, 0.999])]
# For the lowpass:
elif lb_frac == 0:
@@ -461,7 +468,8 @@ class FilterAnalyzer(desc.ResetMixin):
return ts.TimeSeries(data=data_out,
sampling_rate=self.sampling_rate,
- time_unit=self.time_unit)
+ time_unit=self.time_unit,
+ t0=self._ts.t0)
@desc.setattr_on_read
def filtered_boxcar(self):
@@ -486,8 +494,9 @@ class FilterAnalyzer(desc.ResetMixin):
n_iterations=self._boxcar_iterations)
return ts.TimeSeries(data=data_out,
- sampling_rate=self.sampling_rate,
- time_unit=self.time_unit)
+ sampling_rate=self.sampling_rate,
+ time_unit=self.time_unit,
+ t0=self._ts.t0)
class HilbertAnalyzer(BaseAnalyzer):
@@ -510,12 +519,7 @@ class HilbertAnalyzer(BaseAnalyzer):
"""The natural output for this analyzer is the analytic signal """
data = self.input.data
sampling_rate = self.input.sampling_rate
- #If you have scipy with the fixed scipy.signal.hilbert (r6205 and
- #later)
- if scipy.__version__ >= '0.9':
- hilbert = signal.hilbert
- else:
- hilbert = tsu.hilbert_from_new_scipy
+ hilbert = signal.hilbert
return ts.TimeSeries(data=hilbert(data),
sampling_rate=sampling_rate)
diff --git a/nitime/analysis/tests/test_coherence.py b/nitime/analysis/tests/test_coherence.py
index ff528c8..e490b13 100644
--- a/nitime/analysis/tests/test_coherence.py
+++ b/nitime/analysis/tests/test_coherence.py
@@ -4,13 +4,14 @@ import numpy as np
import numpy.testing as npt
import matplotlib
import matplotlib.mlab as mlab
+import pytest
import nitime.timeseries as ts
import nitime.analysis as nta
import platform
-# Some tests might require python version 2.5 or above:
+# Some tests might require python version 2.5 or above:
if float(platform.python_version()[:3]) < 2.5:
old_python = True
else:
@@ -19,7 +20,7 @@ else:
# Matplotlib older than 0.99 will have some issues with the normalization of t
if float(matplotlib.__version__[:3]) < 0.99:
- w_s = "You have a relatively old version of Matplotlib. "
+ w_s = "You have a relatively old version of Matplotlib. "
w_s += " Estimation of the PSD DC component might not be as expected"
w_s += " Consider updating Matplotlib: http://matplotlib.sourceforge.net/"
warnings.warn(w_s, Warning)
@@ -76,7 +77,7 @@ def test_CoherenceAnalyzer():
npt.assert_equal(len(C.coherence_partial.shape), 4)
-@npt.dec.skipif(old_mpl)
+@pytest.mark.skipif(old_mpl, reason="Old MPL")
def test_SparseCoherenceAnalyzer():
Fs = np.pi
t = np.arange(256)
@@ -110,8 +111,8 @@ def test_SparseCoherenceAnalyzer():
npt.assert_almost_equal(C2.delay[0, 1], C1.delay[0, 1])
# Make sure that you would get an error if you provided a method other than
# 'welch':
- npt.assert_raises(ValueError, nta.SparseCoherenceAnalyzer, T,
- method=dict(this_method='foo'))
+ with pytest.raises(ValueError) as e_info:
+ nta.SparseCoherenceAnalyzer(T, method=dict(this_method='foo'))
def test_MTCoherenceAnalyzer():
@@ -132,7 +133,7 @@ def test_MTCoherenceAnalyzer():
NFFT))
-@npt.dec.skipif(old_python)
+@pytest.mark.skipif(old_python, reason="Old Python")
def test_warn_short_tseries():
"""
@@ -186,8 +187,8 @@ def test_SeedCoherenceAnalyzer():
npt.assert_almost_equal(C1.delay[0, 1], C2.delay[1])
else:
- npt.assert_raises(ValueError, nta.SeedCoherenceAnalyzer, T_seed1,
- T_target, this_method)
+ with pytest.raises(ValueError) as e_info:
+ nta.SeedCoherenceAnalyzer(T_seed1, T_target, this_method)
def test_SeedCoherenceAnalyzer_same_Fs():
@@ -206,5 +207,5 @@ def test_SeedCoherenceAnalyzer_same_Fs():
T2 = ts.TimeSeries(np.random.rand(t.shape[-1]),
sampling_rate=Fs2)
-
- npt.assert_raises(ValueError, nta.SeedCoherenceAnalyzer, T1, T2)
+ with pytest.raises(ValueError) as e_info:
+ nta.SeedCoherenceAnalyzer(T1, T2)
diff --git a/nitime/analysis/tests/test_granger.py b/nitime/analysis/tests/test_granger.py
index aba0e88..79ac55e 100644
--- a/nitime/analysis/tests/test_granger.py
+++ b/nitime/analysis/tests/test_granger.py
@@ -109,5 +109,10 @@ def test_GrangerAnalyzer():
# Test inputting ij:
g2 = gc.GrangerAnalyzer(ts1, ij=[(0, 1), (1, 0)])
+ # g1 agrees with g2
+ npt.assert_almost_equal(g1.causality_xy[0, 1], g2.causality_xy[0, 1])
+ npt.assert_almost_equal(g1.causality_yx[0, 1], g2.causality_yx[0, 1])
+
# x => y for one is like y => x for the other:
- npt.assert_almost_equal(g1.causality_yx[1, 0], g2.causality_xy[0, 1])
+ npt.assert_almost_equal(g2.causality_yx[1, 0], g2.causality_xy[0, 1])
+ npt.assert_almost_equal(g2.causality_xy[1, 0], g2.causality_yx[0, 1])
diff --git a/nitime/data/fmri_timeseries.csv b/nitime/data/fmri_timeseries.csv
index 9aaa877..9aaa877 100755..100644
--- a/nitime/data/fmri_timeseries.csv
+++ b/nitime/data/fmri_timeseries.csv
diff --git a/nitime/data/grasshopper_spike_times1.txt b/nitime/data/grasshopper_spike_times1.txt
index cae26c8..cae26c8 100755..100644
--- a/nitime/data/grasshopper_spike_times1.txt
+++ b/nitime/data/grasshopper_spike_times1.txt
diff --git a/nitime/data/grasshopper_spike_times2.txt b/nitime/data/grasshopper_spike_times2.txt
index af08af0..af08af0 100755..100644
--- a/nitime/data/grasshopper_spike_times2.txt
+++ b/nitime/data/grasshopper_spike_times2.txt
diff --git a/nitime/data/grasshopper_stimulus1.txt b/nitime/data/grasshopper_stimulus1.txt
index 9cf4768..9cf4768 100755..100644
--- a/nitime/data/grasshopper_stimulus1.txt
+++ b/nitime/data/grasshopper_stimulus1.txt
diff --git a/nitime/data/grasshopper_stimulus2.txt b/nitime/data/grasshopper_stimulus2.txt
index b9fdd94..b9fdd94 100755..100644
--- a/nitime/data/grasshopper_stimulus2.txt
+++ b/nitime/data/grasshopper_stimulus2.txt
diff --git a/nitime/fmri/hrf.py b/nitime/fmri/hrf.py
index 61123da..82f9269 100644
--- a/nitime/fmri/hrf.py
+++ b/nitime/fmri/hrf.py
@@ -1,6 +1,6 @@
from __future__ import print_function
import numpy as np
-from scipy.misc import factorial
+from scipy.special import factorial
def gamma_hrf(duration, A=1., tau=1.08, n=3, delta=2.05, Fs=1.0):
diff --git a/nitime/fmri/io.py b/nitime/fmri/io.py
index 55d4e6f..e54f86b 100644
--- a/nitime/fmri/io.py
+++ b/nitime/fmri/io.py
@@ -88,7 +88,7 @@ def time_series_from_file(nifti_files, coords=None, TR=None, normalize=None,
if verbose:
print("Reading %s" % nifti_files)
im = load(nifti_files)
- data = im.get_data()
+ data = im.get_fdata()
# If coordinates are provided as input, read data only from these coordinates:
if coords is not None:
#If the input is the coords of several ROIs
@@ -118,7 +118,7 @@ def time_series_from_file(nifti_files, coords=None, TR=None, normalize=None,
if verbose:
print("Reading %s" % f)
im = load(f)
- data = im.get_data()
+ data = im.get_fdata()
if coords is not None:
#If the input is the coords of several ROIs
if isinstance(coords, tuple) or isinstance(coords, list):
diff --git a/nitime/fmri/tests/test_io.py b/nitime/fmri/tests/test_io.py
index 973e599..8a5d78e 100644
--- a/nitime/fmri/tests/test_io.py
+++ b/nitime/fmri/tests/test_io.py
@@ -7,6 +7,7 @@ import os
import numpy as np
import numpy.testing as npt
+import pytest
import nitime
import nitime.timeseries as ts
@@ -22,7 +23,7 @@ except ImportError as e:
data_path = os.path.join(nitime.__path__[0],'data')
-@npt.dec.skipif(no_nibabel,no_nibabel_msg)
+@pytest.mark.skipif(no_nibabel, reason=no_nibabel_msg)
def test_time_series_from_file():
"""Testing reading of data from nifti files, using nibabel"""
@@ -64,7 +65,7 @@ def test_time_series_from_file():
npt.assert_equal(t4.sampling_interval,nitime.TimeArray(1.35))
# Test the default behavior:
- data = io.load(fmri_file1).get_data()
+ data = io.load(fmri_file1).get_fdata()
t5 = ts_ff(fmri_file1)
npt.assert_equal(t5.shape, data.shape)
npt.assert_equal(t5.sampling_interval, ts.TimeArray(1, time_unit='s'))
diff --git a/nitime/six.py b/nitime/six.py
deleted file mode 100644
index 176196c..0000000
--- a/nitime/six.py
+++ /dev/null
@@ -1,585 +0,0 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-# Copyright (c) 2010-2013 Benjamin Peterson
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-import operator
-import sys
-import types
-
-__author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.4.1"
-
-
-# Useful for very coarse version differentiation.
-PY2 = sys.version_info[0] == 2
-PY3 = sys.version_info[0] == 3
-
-if PY3:
- string_types = str,
- integer_types = int,
- class_types = type,
- text_type = str
- binary_type = bytes
-
- MAXSIZE = sys.maxsize
-else:
- string_types = basestring,
- integer_types = (int, long)
- class_types = (type, types.ClassType)
- text_type = unicode
- binary_type = str
-
- if sys.platform.startswith("java"):
- # Jython always uses 32 bits.
- MAXSIZE = int((1 << 31) - 1)
- else:
- # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
- class X(object):
- def __len__(self):
- return 1 << 31
- try:
- len(X())
- except OverflowError:
- # 32-bit
- MAXSIZE = int((1 << 31) - 1)
- else:
- # 64-bit
- MAXSIZE = int((1 << 63) - 1)
- del X
-
-
-def _add_doc(func, doc):
- """Add documentation to a function."""
- func.__doc__ = doc
-
-
-def _import_module(name):
- """Import module, returning the module after the last dot."""
- __import__(name)
- return sys.modules[name]
-
-
-class _LazyDescr(object):
-
- def __init__(self, name):
- self.name = name
-
- def __get__(self, obj, tp):
- result = self._resolve()
- setattr(obj, self.name, result)
- # This is a bit ugly, but it avoids running this again.
- delattr(tp, self.name)
- return result
-
-
-class MovedModule(_LazyDescr):
-
- def __init__(self, name, old, new=None):
- super(MovedModule, self).__init__(name)
- if PY3:
- if new is None:
- new = name
- self.mod = new
- else:
- self.mod = old
-
- def _resolve(self):
- return _import_module(self.mod)
-
-
-class MovedAttribute(_LazyDescr):
-
- def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
- super(MovedAttribute, self).__init__(name)
- if PY3:
- if new_mod is None:
- new_mod = name
- self.mod = new_mod
- if new_attr is None:
- if old_attr is None:
- new_attr = name
- else:
- new_attr = old_attr
- self.attr = new_attr
- else:
- self.mod = old_mod
- if old_attr is None:
- old_attr = name
- self.attr = old_attr
-
- def _resolve(self):
- module = _import_module(self.mod)
- return getattr(module, self.attr)
-
-
-
-class _MovedItems(types.ModuleType):
- """Lazy loading of moved objects"""
-
-
-_moved_attributes = [
- MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
- MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
- MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
- MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
- MovedAttribute("map", "itertools", "builtins", "imap", "map"),
- MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
- MovedAttribute("reduce", "__builtin__", "functools"),
- MovedAttribute("StringIO", "StringIO", "io"),
- MovedAttribute("UserString", "UserString", "collections"),
- MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
- MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
-
- MovedModule("builtins", "__builtin__"),
- MovedModule("configparser", "ConfigParser"),
- MovedModule("copyreg", "copy_reg"),
- MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
- MovedModule("http_cookies", "Cookie", "http.cookies"),
- MovedModule("html_entities", "htmlentitydefs", "html.entities"),
- MovedModule("html_parser", "HTMLParser", "html.parser"),
- MovedModule("http_client", "httplib", "http.client"),
- MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
- MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
- MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
- MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
- MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
- MovedModule("cPickle", "cPickle", "pickle"),
- MovedModule("queue", "Queue"),
- MovedModule("reprlib", "repr"),
- MovedModule("socketserver", "SocketServer"),
- MovedModule("_thread", "thread", "_thread"),
- MovedModule("tkinter", "Tkinter"),
- MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
- MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
- MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
- MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
- MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
- MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
- MovedModule("tkinter_colorchooser", "tkColorChooser",
- "tkinter.colorchooser"),
- MovedModule("tkinter_commondialog", "tkCommonDialog",
- "tkinter.commondialog"),
- MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_font", "tkFont", "tkinter.font"),
- MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
- MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
- "tkinter.simpledialog"),
- MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
- MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
- MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
- MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
- MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
- MovedModule("winreg", "_winreg"),
-]
-for attr in _moved_attributes:
- setattr(_MovedItems, attr.name, attr)
-del attr
-
-moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
-
-
-
-class Module_six_moves_urllib_parse(types.ModuleType):
- """Lazy loading of moved objects in six.moves.urllib_parse"""
-
-
-_urllib_parse_moved_attributes = [
- MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
- MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
- MovedAttribute("urljoin", "urlparse", "urllib.parse"),
- MovedAttribute("urlparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
- MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
- MovedAttribute("quote", "urllib", "urllib.parse"),
- MovedAttribute("quote_plus", "urllib", "urllib.parse"),
- MovedAttribute("unquote", "urllib", "urllib.parse"),
- MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
- MovedAttribute("urlencode", "urllib", "urllib.parse"),
-]
-for attr in _urllib_parse_moved_attributes:
- setattr(Module_six_moves_urllib_parse, attr.name, attr)
-del attr
-
-sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
-sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
-
-
-class Module_six_moves_urllib_error(types.ModuleType):
- """Lazy loading of moved objects in six.moves.urllib_error"""
-
-
-_urllib_error_moved_attributes = [
- MovedAttribute("URLError", "urllib2", "urllib.error"),
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
- MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
-]
-for attr in _urllib_error_moved_attributes:
- setattr(Module_six_moves_urllib_error, attr.name, attr)
-del attr
-
-sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error")
-sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
-
-
-class Module_six_moves_urllib_request(types.ModuleType):
- """Lazy loading of moved objects in six.moves.urllib_request"""
-
-
-_urllib_request_moved_attributes = [
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
- MovedAttribute("pathname2url", "urllib", "urllib.request"),
- MovedAttribute("url2pathname", "urllib", "urllib.request"),
- MovedAttribute("getproxies", "urllib", "urllib.request"),
- MovedAttribute("Request", "urllib2", "urllib.request"),
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
- MovedAttribute("urlretrieve", "urllib", "urllib.request"),
- MovedAttribute("urlcleanup", "urllib", "urllib.request"),
- MovedAttribute("URLopener", "urllib", "urllib.request"),
- MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
-]
-for attr in _urllib_request_moved_attributes:
- setattr(Module_six_moves_urllib_request, attr.name, attr)
-del attr
-
-sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request")
-sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
-
-
-class Module_six_moves_urllib_response(types.ModuleType):
- """Lazy loading of moved objects in six.moves.urllib_response"""
-
-
-_urllib_response_moved_attributes = [
- MovedAttribute("addbase", "urllib", "urllib.response"),
- MovedAttribute("addclosehook", "urllib", "urllib.response"),
- MovedAttribute("addinfo", "urllib", "urllib.response"),
- MovedAttribute("addinfourl", "urllib", "urllib.response"),
-]
-for attr in _urllib_response_moved_attributes:
- setattr(Module_six_moves_urllib_response, attr.name, attr)
-del attr
-
-sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response")
-sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
-
-
-class Module_six_moves_urllib_robotparser(types.ModuleType):
- """Lazy loading of moved objects in six.moves.urllib_robotparser"""
-
-
-_urllib_robotparser_moved_attributes = [
- MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
-]
-for attr in _urllib_robotparser_moved_attributes:
- setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
-del attr
-
-sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser")
-sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
-
-
-class Module_six_moves_urllib(types.ModuleType):
- """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
- parse = sys.modules[__name__ + ".moves.urllib_parse"]
- error = sys.modules[__name__ + ".moves.urllib_error"]
- request = sys.modules[__name__ + ".moves.urllib_request"]
- response = sys.modules[__name__ + ".moves.urllib_response"]
- robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
-
-
-sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
-
-
-def add_move(move):
- """Add an item to six.moves."""
- setattr(_MovedItems, move.name, move)
-
-
-def remove_move(name):
- """Remove item from six.moves."""
- try:
- delattr(_MovedItems, name)
- except AttributeError:
- try:
- del moves.__dict__[name]
- except KeyError:
- raise AttributeError("no such move, %r" % (name,))
-
-
-if PY3:
- _meth_func = "__func__"
- _meth_self = "__self__"
-
- _func_closure = "__closure__"
- _func_code = "__code__"
- _func_defaults = "__defaults__"
- _func_globals = "__globals__"
-
- _iterkeys = "keys"
- _itervalues = "values"
- _iteritems = "items"
- _iterlists = "lists"
-else:
- _meth_func = "im_func"
- _meth_self = "im_self"
-
- _func_closure = "func_closure"
- _func_code = "func_code"
- _func_defaults = "func_defaults"
- _func_globals = "func_globals"
-
- _iterkeys = "iterkeys"
- _itervalues = "itervalues"
- _iteritems = "iteritems"
- _iterlists = "iterlists"
-
-
-try:
- advance_iterator = next
-except NameError:
- def advance_iterator(it):
- return it.next()
-next = advance_iterator
-
-
-try:
- callable = callable
-except NameError:
- def callable(obj):
- return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
-
-
-if PY3:
- def get_unbound_function(unbound):
- return unbound
-
- create_bound_method = types.MethodType
-
- Iterator = object
-else:
- def get_unbound_function(unbound):
- return unbound.im_func
-
- def create_bound_method(func, obj):
- return types.MethodType(func, obj, obj.__class__)
-
- class Iterator(object):
-
- def next(self):
- return type(self).__next__(self)
-
- callable = callable
-_add_doc(get_unbound_function,
- """Get the function out of a possibly unbound function""")
-
-
-get_method_function = operator.attrgetter(_meth_func)
-get_method_self = operator.attrgetter(_meth_self)
-get_function_closure = operator.attrgetter(_func_closure)
-get_function_code = operator.attrgetter(_func_code)
-get_function_defaults = operator.attrgetter(_func_defaults)
-get_function_globals = operator.attrgetter(_func_globals)
-
-
-def iterkeys(d, **kw):
- """Return an iterator over the keys of a dictionary."""
- return iter(getattr(d, _iterkeys)(**kw))
-
-def itervalues(d, **kw):
- """Return an iterator over the values of a dictionary."""
- return iter(getattr(d, _itervalues)(**kw))
-
-def iteritems(d, **kw):
- """Return an iterator over the (key, value) pairs of a dictionary."""
- return iter(getattr(d, _iteritems)(**kw))
-
-def iterlists(d, **kw):
- """Return an iterator over the (key, [values]) pairs of a dictionary."""
- return iter(getattr(d, _iterlists)(**kw))
-
-
-if PY3:
- def b(s):
- return s.encode("latin-1")
- def u(s):
- return s
- unichr = chr
- if sys.version_info[1] <= 1:
- def int2byte(i):
- return bytes((i,))
- else:
- # This is about 2x faster than the implementation above on 3.2+
- int2byte = operator.methodcaller("to_bytes", 1, "big")
- byte2int = operator.itemgetter(0)
- indexbytes = operator.getitem
- iterbytes = iter
- import io
- StringIO = io.StringIO
- BytesIO = io.BytesIO
-else:
- def b(s):
- return s
- def u(s):
- return unicode(s, "unicode_escape")
- unichr = unichr
- int2byte = chr
- def byte2int(bs):
- return ord(bs[0])
- def indexbytes(buf, i):
- return ord(buf[i])
- def iterbytes(buf):
- return (ord(byte) for byte in buf)
- import StringIO
- StringIO = BytesIO = StringIO.StringIO
-_add_doc(b, """Byte literal""")
-_add_doc(u, """Text literal""")
-
-
-if PY3:
- exec_ = getattr(moves.builtins, "exec")
-
-
- def reraise(tp, value, tb=None):
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
-else:
- def exec_(_code_, _globs_=None, _locs_=None):
- """Execute code in a namespace."""
- if _globs_ is None:
- frame = sys._getframe(1)
- _globs_ = frame.f_globals
- if _locs_ is None:
- _locs_ = frame.f_locals
- del frame
- elif _locs_ is None:
- _locs_ = _globs_
- exec("""exec _code_ in _globs_, _locs_""")
-
-
- exec_("""def reraise(tp, value, tb=None):
- raise tp, value, tb
-""")
-
-
-print_ = getattr(moves.builtins, "print", None)
-if print_ is None:
- def print_(*args, **kwargs):
- """The new-style print function for Python 2.4 and 2.5."""
- fp = kwargs.pop("file", sys.stdout)
- if fp is None:
- return
- def write(data):
- if not isinstance(data, basestring):
- data = str(data)
- # If the file has an encoding, encode unicode with it.
- if (isinstance(fp, file) and
- isinstance(data, unicode) and
- fp.encoding is not None):
- errors = getattr(fp, "errors", None)
- if errors is None:
- errors = "strict"
- data = data.encode(fp.encoding, errors)
- fp.write(data)
- want_unicode = False
- sep = kwargs.pop("sep", None)
- if sep is not None:
- if isinstance(sep, unicode):
- want_unicode = True
- elif not isinstance(sep, str):
- raise TypeError("sep must be None or a string")
- end = kwargs.pop("end", None)
- if end is not None:
- if isinstance(end, unicode):
- want_unicode = True
- elif not isinstance(end, str):
- raise TypeError("end must be None or a string")
- if kwargs:
- raise TypeError("invalid keyword arguments to print()")
- if not want_unicode:
- for arg in args:
- if isinstance(arg, unicode):
- want_unicode = True
- break
- if want_unicode:
- newline = unicode("\n")
- space = unicode(" ")
- else:
- newline = "\n"
- space = " "
- if sep is None:
- sep = space
- if end is None:
- end = newline
- for i, arg in enumerate(args):
- if i:
- write(sep)
- write(arg)
- write(end)
-
-_add_doc(reraise, """Reraise an exception.""")
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- return meta("NewBase", bases, {})
-
-def add_metaclass(metaclass):
- """Class decorator for creating a class with a metaclass."""
- def wrapper(cls):
- orig_vars = cls.__dict__.copy()
- orig_vars.pop('__dict__', None)
- orig_vars.pop('__weakref__', None)
- for slots_var in orig_vars.get('__slots__', ()):
- orig_vars.pop(slots_var)
- return metaclass(cls.__name__, cls.__bases__, orig_vars)
- return wrapper
diff --git a/nitime/testlib.py b/nitime/testlib.py
deleted file mode 100644
index e2a1e73..0000000
--- a/nitime/testlib.py
+++ /dev/null
@@ -1,130 +0,0 @@
-"""Utilities to facilitate the writing of tests for nitime.
-"""
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-
-#-----------------------------------------------------------------------------
-# Functions and classes
-#-----------------------------------------------------------------------------
-
-def import_nose():
- """
- Import nose only when needed.
- """
- fine_nose = True
- minimum_nose_version = (0,10,0)
- try:
- import nose
- from nose.tools import raises
- except ImportError:
- fine_nose = False
- else:
- if nose.__versioninfo__ < minimum_nose_version:
- fine_nose = False
-
- if not fine_nose:
- msg = 'Need nose >= %d.%d.%d for tests - see ' \
- 'http://somethingaboutorange.com/mrl/projects/nose' % \
- minimum_nose_version
-
- raise ImportError(msg)
-
- return nose
-
-def fpw_opt_str():
- """
- Return first-package-wins option string for this version of nose
-
- Versions of nose prior to 1.1.0 needed ``=True`` for ``first-package-wins``,
- versions after won't accept it.
-
- changeset: 816:c344a4552d76
- http://code.google.com/p/python-nose/issues/detail?id=293
-
- Returns
- -------
- fpw_str : str
- Either '--first-package-wins' or '--first-package-wins=True' depending
- on the nose version we are running.
- """
- # protect nose import to provide comprehensible error if missing
- nose = import_nose()
- config = nose.config.Config()
- fpw_str = '--first-package-wins'
- opt_parser = config.getParser('')
- opt_def = opt_parser.get_option('--first-package-wins')
- if opt_def is None:
- raise RuntimeError('Nose does not accept "first-package-wins"'
- ' - is this an old nose version?')
- if opt_def.takes_value(): # the =True variant
- fpw_str += '=True'
- return fpw_str
-
-
-def test(nose_arg='nitime', doctests=True, first_package_wins=True, extra_argv=None):
- """
-
- Run the nitime test suite using nose.
-
- Parameters
- ----------
-
- nose_arg: string, optional
- What the first nose argument should be. Defaults to 'nitime', which
- will run all of the tests that can be found for the package, but this
- argument allows you to test a subset of the test suite, such as
- 'nitime.tests.test_timeseries' or even a specific test using
- 'nitime.tests.test_timeseries:test_TimeArray_comparison'.
-
- doctests: bool, optional
- Whether to run the doctests. Defaults to True
-
- first_package_wins: bool, optional
- Don't evict packages from sys.module, if detecting another package with
- the same name in some other location(nosetests default behavior is to do
- that).
-
- extra_argv: string, list or tuple, optional
- Additional argument (string) or arguments (list or tuple of strings) to
- be passed to nose when running the tests.
-
- """
- from numpy.testing import noseclasses
- # We construct our own argv manually, so we must set argv[0] ourselves
- argv = ['nosetests',
- # Name the package to actually test, in this case nitime
- nose_arg,
-
- # extra info in tracebacks
- '--detailed-errors',
-
- # We add --exe because of setuptools' imbecility (it blindly does
- # chmod +x on ALL files). Nose does the right thing and it tries
- # to avoid executables, setuptools unfortunately forces our hand
- # here. This has been discussed on the distutils list and the
- # setuptools devs refuse to fix this problem!
- '--exe',
- ]
-
- # If someone wants to add some other argv
- if extra_argv is not None:
- if isinstance(extra_argv, list) or isinstance(extra_argv, list):
- for this in extra_argv: argv.append(this)
- else:
- argv.append(extra_argv)
-
- if first_package_wins:
- argv.append(fpw_opt_str())
-
- if doctests:
- argv.append('--with-doctest')
- plugins = [noseclasses.KnownFailure()]
- # Now nose can run
- return noseclasses.NumpyTestProgram(argv=argv, exit=False,
- addplugins=plugins).result
-
-# Tell nose that the test() function itself isn't a test, otherwise we get a
-# recursive loop inside nose.
-test.__test__ = False
diff --git a/nitime/tests/test_algorithms.py b/nitime/tests/test_algorithms.py
index 6210b10..ff929f6 100644
--- a/nitime/tests/test_algorithms.py
+++ b/nitime/tests/test_algorithms.py
@@ -2,10 +2,8 @@ import os
import numpy as np
import numpy.testing as npt
-import numpy.testing.decorators as dec
-from scipy.signal import signaltools
-from scipy import fftpack
+from scipy import fftpack, signal
import nitime
from nitime import algorithms as tsa
@@ -25,16 +23,16 @@ def test_scipy_resample():
for f in freq_list]
tst = np.array(a).sum(axis=0)
# interpolate to 128 Hz sampling
- t_up = signaltools.resample(tst, 128)
+ t_up = signal.resample(tst, 128)
np.testing.assert_array_almost_equal(t_up[::2], tst)
# downsample to 32 Hz
- t_dn = signaltools.resample(tst, 32)
+ t_dn = signal.resample(tst, 32)
np.testing.assert_array_almost_equal(t_dn, tst[::2])
# downsample to 48 Hz, and compute the sampling analytically for comparison
dn_samp_ana = np.array([np.sin(2 * np.pi * f * np.linspace(0, 1, 48, endpoint=False))
for f in freq_list]).sum(axis=0)
- t_dn2 = signaltools.resample(tst, 48)
+ t_dn2 = signal.resample(tst, 48)
npt.assert_array_almost_equal(t_dn2, dn_samp_ana)
@@ -116,12 +114,12 @@ def test_get_spectra():
f_periodogram = tsa.get_spectra(x, method={'this_method': 'periodogram_csd'})
f_multi_taper = tsa.get_spectra(x, method={'this_method': 'multi_taper_csd'})
- npt.assert_equal(f_welch[0].shape, (NFFT / 2 + 1,))
- npt.assert_equal(f_periodogram[0].shape, (N / 2 + 1,))
- npt.assert_equal(f_multi_taper[0].shape, (N / 2 + 1,))
+ npt.assert_equal(f_welch[0].shape, (NFFT // 2 + 1,))
+ npt.assert_equal(f_periodogram[0].shape, (N // 2 + 1,))
+ npt.assert_equal(f_multi_taper[0].shape, (N // 2 + 1,))
#Test for multi-channel data
- x = np.reshape(x, (2, x.shape[-1] / 2))
+ x = np.reshape(x, (2, x.shape[-1] // 2))
N = x.shape[-1]
#Make sure you get back the expected shape for different spectra:
@@ -146,11 +144,11 @@ def test_psd_matlab():
ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))
#Complex signal!
- ts0 = ts[1] + ts[0] * np.complex(0, 1)
+ ts0 = ts[1] + ts[0] * complex(0, 1)
NFFT = 256
Fs = 1.0
- noverlap = NFFT / 2
+ noverlap = NFFT // 2
fxx, f = mlab.psd(ts0, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
scale_by_freq=True)
@@ -161,7 +159,7 @@ def test_psd_matlab():
npt.assert_almost_equal(fxx_mlab, fxx_matlab, decimal=5)
-@dec.slow
+
def test_long_dpss_win():
""" Test that very long dpss windows can be generated (using interpolation)"""
diff --git a/nitime/tests/test_analysis.py b/nitime/tests/test_analysis.py
index 4e0d70c..3f53337 100644
--- a/nitime/tests/test_analysis.py
+++ b/nitime/tests/test_analysis.py
@@ -1,5 +1,6 @@
import numpy as np
import numpy.testing as npt
+import pytest
import nitime.timeseries as ts
import nitime.analysis as nta
@@ -70,10 +71,10 @@ def test_CorrelationAnalyzer():
C = nta.CorrelationAnalyzer(T)
# Test the symmetry: correlation(x,y)==correlation(y,x)
- npt.assert_equal(C.corrcoef[0, 1], C.corrcoef[1, 0])
+ npt.assert_almost_equal(C.corrcoef[0, 1], C.corrcoef[1, 0])
# Test the self-sameness: correlation(x,x)==1
- npt.assert_equal(C.corrcoef[0, 0], 1)
- npt.assert_equal(C.corrcoef[1, 1], 1)
+ npt.assert_almost_equal(C.corrcoef[0, 0], 1)
+ npt.assert_almost_equal(C.corrcoef[1, 1], 1)
# Test the cross-correlation:
# First the symmetry:
@@ -85,8 +86,7 @@ def test_CorrelationAnalyzer():
C.corrcoef[0, 1])
# And the auto-correlation should be equal to 1 at 0 time-lag:
- npt.assert_equal(C.xcorr_norm.data[0, 0, C.xcorr_norm.time == 0], 1)
-
+ npt.assert_almost_equal(C.xcorr_norm.data[0, 0, C.xcorr_norm.time == 0], 1)
# Does it depend on having an even number of time-points?
# make another time-series with an odd number of items:
t = np.arange(1023)
@@ -117,7 +117,7 @@ def test_EventRelatedAnalyzer():
T_signal = ts.TimeSeries(signal, sampling_rate=1)
T_events = ts.TimeSeries(events, sampling_rate=1)
- for correct_baseline in [True,False]:
+ for correct_baseline in [True, False]:
ETA = nta.EventRelatedAnalyzer(T_signal, T_events, l / (cycles * 2),
correct_baseline=correct_baseline).eta
# This should hold
@@ -176,12 +176,13 @@ def test_EventRelatedAnalyzer():
# Test that providing the analyzer with an array, instead of an Events or a
# TimeSeries object throws an error:
- npt.assert_raises(ValueError, nta.EventRelatedAnalyzer, ts2, events, 10)
+ with pytest.raises(ValueError) as e_info:
+ nta.EventRelatedAnalyzer(ts2, events, 10)
# This is not yet implemented, so this should simply throw an error, for
# now:
- npt.assert_raises(NotImplementedError,
- nta.EventRelatedAnalyzer.FIR_estimate, EA)
+ with pytest.raises(NotImplementedError) as e_info:
+ nta.EventRelatedAnalyzer.FIR_estimate(EA)
def test_HilbertAnalyzer():
"""Testing the HilbertAnalyzer (analytic signal)"""
@@ -270,6 +271,17 @@ def test_FilterAnalyzer():
npt.assert_equal(f_both.filtered_boxcar.shape, T2.shape)
npt.assert_equal(f_both.filtered_fourier.shape, T2.shape)
+ # Check that t0 is propagated to the filtered time-series
+ t0 = np.pi
+ T3 = ts.TimeSeries(np.vstack([fast, slow]), sampling_rate=np.pi, t0=t0)
+ f_both = nta.FilterAnalyzer(T3, ub=1.0, lb=0.1)
+ # These are rather basic tests:
+ npt.assert_equal(f_both.fir.t0, ts.TimeArray(t0, time_unit=T3.time_unit))
+ npt.assert_equal(f_both.iir.t0, ts.TimeArray(t0, time_unit=T3.time_unit))
+ npt.assert_equal(f_both.filtered_boxcar.t0, ts.TimeArray(t0,
+ time_unit=T3.time_unit))
+ npt.assert_equal(f_both.filtered_fourier.t0, ts.TimeArray(t0,
+ time_unit=T3.time_unit))
def test_NormalizationAnalyzer():
"""Testing the NormalizationAnalyzer """
@@ -302,3 +314,15 @@ def test_MorletWaveletAnalyzer():
npt.assert_almost_equal(np.sin(HL.phase.data[10:-10]),
np.sin(WL.phase.data[10:-10]),
decimal=0)
+
+
+def test_MTCoherenceAnalyzer():
+ """
+ Based on gh-188
+ """
+ my_signal = np.random.randn(10, int(np.round(30.02*89)))
+ multitaper_bandwidth = 0.1 # [Hz]
+ TS = ts.TimeSeries(my_signal, sampling_rate=30.02)
+ # T.metadata['roi'] = vessel_names
+ C2 = nta.MTCoherenceAnalyzer(TS, bandwidth=multitaper_bandwidth)
+ npt.assert_equal(C2.coherence.shape, (10, 10, 1337))
diff --git a/nitime/tests/test_descriptors.py b/nitime/tests/test_descriptors.py
index 5829853..3cb0b49 100644
--- a/nitime/tests/test_descriptors.py
+++ b/nitime/tests/test_descriptors.py
@@ -4,7 +4,7 @@
# Imports
#-----------------------------------------------------------------------------
-import nose.tools as nt
+import numpy.testing as nt
from nitime import descriptors as desc
@@ -32,11 +32,11 @@ class A(desc.ResetMixin):
def test():
a = A(10)
- nt.assert_false('y' in a.__dict__)
- nt.assert_equals(a.y, 5)
- nt.assert_true('y' in a.__dict__)
+ nt.assert_('y' not in a.__dict__)
+ nt.assert_(a.y == 5)
+ nt.assert_('y' in a.__dict__)
a.x = 20
- nt.assert_equals(a.y, 5)
+ nt.assert_(a.y == 5)
# Call reset and no error should be raised even though z was never accessed
a.reset()
- nt.assert_equals(a.y, 10)
+ nt.assert_(a.y == 10)
diff --git a/nitime/tests/test_lazy.py b/nitime/tests/test_lazy.py
index bbb874f..498c5fb 100644
--- a/nitime/tests/test_lazy.py
+++ b/nitime/tests/test_lazy.py
@@ -1,20 +1,23 @@
import sys
-import os
-import nitime.lazyimports as l
+import numpy as np
import numpy.testing as npt
-import numpy.testing.decorators as dec
+
+import pytest
+
+import nitime.lazyimports as l
# The next test requires nitime.lazyimports.disable_lazy_imports to have been
# set to false, otherwise the lazy import machinery is disabled and all imports
# happen at l.LazyImport calls which become equivalent to regular import
# statements
-@dec.skipif(l.disable_lazy_imports)
+@pytest.mark.skipif(l.disable_lazy_imports, reason="Lazy imports disabled")
def test_lazy():
mlab = l.LazyImport('matplotlib.mlab')
# repr for mlab should be <module 'matplotlib.mlab' will be lazily loaded>
assert 'lazily loaded' in repr(mlab)
# accessing mlab's attribute will cause an import of mlab
- npt.assert_equal(mlab.dist(1969,2011), 42.0)
+ npt.assert_(np.all(mlab.detrend_mean(np.array([1, 2, 3])) ==
+ np.array([-1., 0., 1.])))
# now mlab should be of class LoadedLazyImport an repr(mlab) should be
# <module 'matplotlib.mlab' from # '.../matplotlib/mlab.pyc>
assert 'lazily loaded' not in repr(mlab)
@@ -22,7 +25,7 @@ def test_lazy():
# A known limitation of our lazy loading implementation is that, when it it is
# enabled, reloading the module raises an ImportError, and it also does not
# actually perform a reload, as demonstrated by this test.
-@dec.skipif(l.disable_lazy_imports)
+@pytest.mark.skipif(l.disable_lazy_imports, reason="Lazy imports disabled")
def test_lazy_noreload():
"Reloading of lazy modules causes ImportError"
mod = l.LazyImport('sys')
@@ -31,10 +34,9 @@ def test_lazy_noreload():
# do not use named tuple feature for Python 2.6 compatibility
major, minor = sys.version_info[:2]
if major == 2:
- npt.assert_raises(ImportError, reload, mod)
+ with pytest.raises(ImportError) as e_info:
+ reload(mod)
elif major == 3:
- import imp
- if minor == 2:
- npt.assert_raises(ImportError, imp.reload, mod)
- elif minor == 3:
- npt.assert_raises(TypeError, imp.reload, mod)
+ import importlib
+ with pytest.raises(ImportError) as e_info:
+ importlib.reload(mod)
diff --git a/nitime/tests/test_timeseries.py b/nitime/tests/test_timeseries.py
index c9dd49c..19a8316 100644
--- a/nitime/tests/test_timeseries.py
+++ b/nitime/tests/test_timeseries.py
@@ -1,7 +1,7 @@
import numpy as np
import numpy.testing as npt
import nitime.timeseries as ts
-import nose.tools as nt
+import pytest
@@ -40,7 +40,8 @@ def test_TimeArray():
time1 = ts.TimeArray(10 ** 6)
npt.assert_equal(time1.__repr__(), '1000000.0 s')
#TimeArray can't be more than 1-d:
- nt.assert_raises(ValueError, ts.TimeArray, np.zeros((2, 2)))
+ with pytest.raises(ValueError) as e_info:
+ ts.TimeArray(np.zeros((2, 2)))
dt = ts.TimeArray(0.001, time_unit='s')
tt = ts.TimeArray([dt])
@@ -89,7 +90,7 @@ def test_TimeArray_math():
tnew = timeunits + list(range(10))
npt.assert_equal(tnew, timeunits+time1) # recall that time1 was 0-10ms
-
+
def test_TimeArray_comparison():
"Comparison with unitless quantities should convert to TimeArray units"
@@ -123,7 +124,7 @@ def test_TimeArray_init_list():
tl = [t]
ta = ts.TimeArray(t, time_unit='s')
tla = ts.TimeArray(tl, time_unit='s')
- nt.assert_equal(ta, tla)
+ npt.assert_(ta, tla)
def test_TimeArray_repr():
@@ -156,8 +157,7 @@ def test_TimeArray_copyflag():
def test_TimeArray_new():
for unit in ['ns', 'ms', 's', None]:
- for flag, assertion in [(True, nt.assert_not_equal),
- (False, nt.assert_equal)]:
+ for flag in [True, False]:
#list -doesn't make sense to set copy=True
time2 = ts.TimeArray(list(range(5)), time_unit=unit, copy=True)
#numpy array (float) - doesn't make sense to set copy=True
@@ -172,7 +172,10 @@ def test_TimeArray_new():
npt.assert_equal(time2, time2f)
npt.assert_equal(time2, time3)
time3[0] += 100
- assertion(time2[0], time3[0])
+ if flag:
+ npt.assert_(time2[0] != time3[0])
+ else:
+ npt.assert_(time2[0] == time3[0])
npt.assert_equal(time2[1:], time3[1:])
npt.assert_equal(time4, time5)
@@ -184,7 +187,7 @@ def test_TimeArray_bool():
bool_arr = np.ones(time1.shape, dtype=bool)
npt.assert_equal(time1, time2)
npt.assert_equal(bool_arr, time1 == time2)
- nt.assert_not_equal(type(time1 == time2), ts.TimeArray)
+ npt.assert_(type(time1 == time2) is not ts.TimeArray)
def test_TimeArray_convert_unit():
@@ -330,7 +333,7 @@ def test_UniformTime_index_at():
def test_TimeArray_getset():
t1 = ts.TimeSeries(data = np.random.rand(2, 3, 4), sampling_rate=1)
npt.assert_equal(t1[0],t1.data[...,0])
-
+
@@ -385,16 +388,16 @@ def test_UniformTime():
#This should raise a value error, because the duration is shorter than
#the sampling_interval:
- npt.assert_raises(ValueError,
- ts.UniformTime,
- dict(sampling_interval=10, duration=1))
+ with pytest.raises(ValueError) as e_info:
+ ts.UniformTime(dict(sampling_interval=10, duration=1))
#Time objects can be initialized with other time objects setting the
#duration, sampling_interval and sampling_rate:
a = ts.UniformTime(length=1, sampling_rate=1)
- npt.assert_raises(ValueError, ts.UniformTime, dict(data=a,
- sampling_rate=10, sampling_interval=.1))
+ with pytest.raises(ValueError) as e_info:
+ ts.UniformTime(dict(data=a, sampling_rate=10, sampling_interval=.1))
+
b = ts.UniformTime(duration=2 * a.sampling_interval,
sampling_rate=2 * a.sampling_rate)
@@ -488,10 +491,10 @@ def test_TimeSeries():
t1 = ts.UniformTime(length=8, sampling_rate=2)
#duration is the same, but we're downsampling to 1Hz
tseries1 = ts.TimeSeries(data=[1, 2, 3, 4], time=t1, sampling_rate=1)
- #If you didn't explicitely provide the rate you want to downsample to, that
+ #If you didn't explicitly provide the rate you want to downsample to, that
#is an error:
- npt.assert_raises(ValueError, ts.TimeSeries, dict(data=[1, 2, 3, 4],
- time=t1))
+ with pytest.raises(ValueError) as e_info:
+ ts.TimeSeries(dict(data=[1, 2, 3, 4], time=t1))
tseries2 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1)
tseries3 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1000,
@@ -528,10 +531,10 @@ def test_TimeSeries():
data = [1, 2, 3, 4]
#If the data is not the right length, that should throw an error:
- npt.assert_raises(ValueError,
- ts.TimeSeries, dict(data=data, time=t))
+ with pytest.raises(ValueError) as e_info:
+ ts.TimeSeries(dict(data=data, time=t))
- # test basic arithmetics wiht TimeSeries
+ # test basic arithmetics with TimeSeries
tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_rate=1)
tseries2 = tseries1 + 1
npt.assert_equal(tseries1.data + 1, tseries2.data)
@@ -542,10 +545,24 @@ def test_TimeSeries():
tseries2 = tseries1 * 2
npt.assert_equal(tseries1.data * 2, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
- tseries2 /= 2
+ tseries2 = tseries2 / 2
npt.assert_equal(tseries1.data, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
+ tseries_nd1 = ts.TimeSeries(np.random.randn(3, 100), sampling_rate=1)
+ tseries_nd2 = ts.TimeSeries(np.random.randn(3, 100), sampling_rate=1)
+ npt.assert_equal((tseries_nd1 + tseries_nd2).data,
+ tseries_nd1.data + tseries_nd2.data)
+
+ npt.assert_equal((tseries_nd1 - tseries_nd2).data,
+ tseries_nd1.data - tseries_nd2.data)
+
+ npt.assert_equal((tseries_nd1 * tseries_nd2).data,
+ tseries_nd1.data * tseries_nd2.data)
+
+ npt.assert_equal((tseries_nd1 / tseries_nd2).data,
+ tseries_nd1.data / tseries_nd2.data)
+
def test_TimeSeries_repr():
"""
@@ -647,7 +664,8 @@ def test_Epochs():
# raise error, though future jagged array implementation could go here)
ejag = ts.Epochs([0, 10], [10, 40], time_unit=t.time_unit)
# next line is the same as t[ejag]
- npt.assert_raises(ValueError, t.__getitem__, ejag)
+ with pytest.raises(ValueError) as e_info:
+ t.__getitem__(ejag)
# if an epoch lies entirely between samples in the timeseries,
# return an empty array
@@ -657,12 +675,14 @@ def test_Epochs():
e1ms_outofrange = ts.Epochs(200, 300, time_unit=t.time_unit)
# assert that with the epoch moved outside of the time range of our
# data, slicing with the epoch now yields an empty array
- npt.assert_raises(ValueError, t.during, dict(e=e1ms_outofrange))
+ with pytest.raises(ValueError) as e_info:
+ t.during(dict(e=e1ms_outofrange))
# the sample timeseries are all shorter than a day, so this should
# raise an error (instead of padding, or returning a shorter than
# expected array.
- npt.assert_raises(ValueError, t.during, dict(e=e1d))
+ with pytest.raises(ValueError) as e_info:
+ t.during(dict(e=e1d))
def test_basic_slicing():
t = ts.TimeArray(list(range(4)))
@@ -684,7 +704,8 @@ def test_basic_slicing():
def test_Events():
# time has to be one-dimensional
- nt.assert_raises(ValueError, ts.Events, np.zeros((2, 2)))
+ with pytest.raises(ValueError) as e_info:
+ ts.Events(np.zeros((2, 2)))
t = ts.TimeArray([1, 2, 3], time_unit='ms')
x = [1, 2, 3]
@@ -704,13 +725,12 @@ def test_Events():
indices=[i0, i1])
# Note that the length of indices and labels has to be identical:
- nt.assert_raises(ValueError, ts.Events, t, time_unit=unit,
- labels=['trial',
- 'other'],
- indices=[i0])# Only
- # one of
- # the
- # indices!
+ with pytest.raises(ValueError) as e_info:
+ ts.Events(t, time_unit=unit,
+ labels=['trial', 'other'], indices=[i0]) # Only
+ # one of
+ # the
+ # indices!
# make sure the time is retained
npt.assert_equal(ev1.time, t)
@@ -797,7 +817,8 @@ def test_masked_array_timeseries():
# make sure regular arrays passed don't become masked
notmasked = np.array([0,np.nan,2])
t2 = ts.TimeSeries(notmasked, sampling_interval=1)
- npt.assert_raises(AttributeError, t2.data.__getattribute__,'mask')
+ with pytest.raises(AttributeError) as e_info:
+ t2.data.__getattribute__('mask')
def test_masked_array_events():
# make sure masked arrays passed in stay as masked arrays
@@ -808,7 +829,8 @@ def test_masked_array_events():
# make sure regular arrays passed don't become masked
notmasked = np.array([0,np.nan,2])
e2 = ts.Events([1,2,3], d=notmasked)
- npt.assert_raises(AttributeError, e2.data['d'].__getattribute__,'mask')
+ with pytest.raises(AttributeError) as e_info:
+ e2.data['d'].__getattribute__('mask')
def test_event_subclass_slicing():
"Subclassing Events should preserve the subclass after slicing"
@@ -860,7 +882,8 @@ def test_UniformTime_preserves_uniformity():
utime = ts.UniformTime(t0=0, length=10, sampling_rate=1)
def assign_to_one_element_of(t): t[0]=42
- nt.assert_raises(ValueError, assign_to_one_element_of,utime)
+ with pytest.raises(ValueError) as e_info:
+ assign_to_one_element_of(utime)
# same as utime, but starting 10s later
utime10 = ts.UniformTime(t0=10, length=10, sampling_rate=1)
@@ -880,7 +903,8 @@ def test_UniformTime_preserves_uniformity():
nonuniform = np.concatenate((list(range(2)),list(range(3)), list(range(5))))
def iadd_nonuniform(t): t+=nonuniform
- nt.assert_raises(ValueError, iadd_nonuniform, utime)
+ with pytest.raises(ValueError) as e_info:
+ iadd_nonuniform(utime)
def test_index_int64():
"indexing with int64 should still return a valid TimeArray"
@@ -891,16 +915,18 @@ def test_index_int64():
assert b[0] == b[np.int32(0)]
assert repr(b[0]) == repr(b[np.int32(0)])
+
def test_timearray_math_functions():
"Calling TimeArray.min() .max(), mean() should return TimeArrays"
- a = np.arange(2,11)
- for f in ['min','max','mean', 'ptp', 'sum']:
+ a = np.arange(2, 11)
+ for f in ['min', 'max', 'mean', 'ptp', 'sum']:
for tu in ['s', 'ms', 'ps', 'D']:
b = ts.TimeArray(a, time_unit=tu)
- assert getattr(b, f)().__class__ == ts.TimeArray
- assert getattr(b, f)().time_unit== b.time_unit
+ npt.assert_(getattr(b, f)().__class__ == ts.TimeArray)
+ npt.assert_(getattr(b, f)().time_unit == b.time_unit)
# comparison with unitless should convert to the TimeArray's units
- assert getattr(b, f)() == getattr(a,f)()
+ npt.assert_(getattr(b, f)() == getattr(a, f)())
+
def test_timearray_var_prod():
"""
@@ -908,5 +934,7 @@ def test_timearray_var_prod():
implemented and raise an error
"""
a = ts.TimeArray(list(range(10)))
- npt.assert_raises(NotImplementedError, a.var)
- npt.assert_raises(NotImplementedError, a.prod)
+ with pytest.raises(NotImplementedError) as e_info:
+ a.var()
+ with pytest.raises(NotImplementedError) as e_info:
+ a.prod()
diff --git a/nitime/tests/test_utils.py b/nitime/tests/test_utils.py
index 2626f8a..7770227 100644
--- a/nitime/tests/test_utils.py
+++ b/nitime/tests/test_utils.py
@@ -1,6 +1,5 @@
import numpy as np
import numpy.testing as npt
-import nose.tools as nt
from nitime import utils
import nitime.algorithms as alg
@@ -14,13 +13,13 @@ def test_zscore():
z = utils.zscore(x)
npt.assert_equal(x.shape, z.shape)
- #Default axis is -1
+ # Default axis is -1
npt.assert_equal(utils.zscore(x), np.array([[-1., -1., 1., 1.],
- [-1., -1., 1., 1.]]))
+ [-1., -1., 1., 1.]]))
- #Test other axis:
+ # Test other axis:
npt.assert_equal(utils.zscore(x, 0), np.array([[-1., -1., -1., -1.],
- [1., 1., 1., 1.]]))
+ [1., 1., 1., 1.]]))
def test_percent_change():
@@ -77,7 +76,7 @@ def test_tridi_inverse_iteration():
ab[1], sup_diag, w[j], x0=np.sin((j+1)*t)
)
b = A*e
- nt.assert_true(
+ npt.assert_(
np.linalg.norm(np.abs(b) - np.abs(w[j]*e)) < 1e-8,
'Inverse iteration eigenvector solution is inconsistent with '\
'given eigenvalue'
@@ -103,7 +102,7 @@ def ref_crosscov(x, y, all_lags=True):
sxy = np.correlate(x, y, mode='full') / lx
if all_lags:
return sxy
- c_idx = pad_len / 2
+ c_idx = pad_len // 2
return sxy[c_idx:]
@@ -117,7 +116,7 @@ def test_crosscov():
sxy_ref = ref_crosscov(ar_seq1, ar_seq2, all_lags=all_lags)
err = sxy_ref - sxy
mse = np.dot(err, err) / N
- nt.assert_true(mse < 1e-12, \
+ npt.assert_(mse < 1e-12, \
'Large mean square error w.r.t. reference cross covariance')
@@ -125,10 +124,10 @@ def test_autocorr():
N = 128
ar_seq, _, _ = utils.ar_generator(N=N)
rxx = utils.autocorr(ar_seq)
- nt.assert_true(rxx[0] == rxx.max(), \
+ npt.assert_(rxx[0] == rxx.max(), \
'Zero lag autocorrelation is not maximum autocorrelation')
rxx = utils.autocorr(ar_seq, all_lags=True)
- nt.assert_true(rxx[127] == rxx.max(), \
+ npt.assert_(rxx[127] == rxx.max(), \
'Zero lag autocorrelation is not maximum autocorrelation')
def test_information_criteria():
@@ -197,9 +196,7 @@ def test_information_criteria():
# We do not test this for AIC/AICc, because these sometimes do not minimize
# (see Ding and Bressler)
- # nt.assert_equal(np.argmin(AIC), 2)
- # nt.assert_equal(np.argmin(AICc), 2)
- nt.assert_equal(np.argmin(BIC), 2)
+ npt.assert_equal(np.argmin(BIC), 2)
def test_multi_intersect():
@@ -219,7 +216,7 @@ def test_zero_pad():
"""
# Freely assume that time is the last dimension:
ts1 = np.empty((64, 64, 35, 32))
- NFFT = 64
+ NFFT = 64
zp1 = utils.zero_pad(ts1, NFFT)
npt.assert_equal(zp1.shape[-1], NFFT)
@@ -227,7 +224,7 @@ def test_zero_pad():
ts2 = np.empty(64)
zp2 = utils.zero_pad(ts2, NFFT)
npt.assert_equal(zp2.shape[-1], NFFT)
-
+
def test_detect_lines():
"""
@@ -258,7 +255,7 @@ def test_detect_lines():
f, b = utils.detect_lines(sig, (NW, 2*NW), low_bias=True, NFFT=2**fft_pow)
h_est = 2*(b[:,None]*np.exp(2j*np.pi*tx*f[:,None])).real
- nt.assert_true(
+ npt.assert_(
len(f) == 3, 'The wrong number of harmonic components were detected'
)
@@ -273,12 +270,12 @@ def test_detect_lines():
# FFT bin detections should be exact
npt.assert_equal(lines, f)
# amplitude estimation should be pretty good
- nt.assert_true(amp_err < 1e-4, 'Harmonic amplitude was poorly estimated')
+ npt.assert_(amp_err < 1e-4, 'Harmonic amplitude was poorly estimated')
# phase estimation should be decent
- nt.assert_true(phs_err < 1e-3, 'Harmonic phase was poorly estimated')
+ npt.assert_(phs_err < 1e-3, 'Harmonic phase was poorly estimated')
# the error relative to the noise power should be below 1
rel_mse = np.mean(err**2)/nz_sig**2
- nt.assert_true(
+ npt.assert_(
rel_mse < 1,
'The error in detected harmonic components is too high relative to '\
'the noise level: %1.2e'%rel_mse
@@ -297,12 +294,11 @@ def test_detect_lines_2dmode():
lines = utils.detect_lines(sig2d, (4, 8), low_bias=True, NFFT=2**12)
- nt.assert_true(len(lines)==3, 'Detect lines failed multi-sequence mode')
+ npt.assert_(len(lines)==3, 'Detect lines failed multi-sequence mode')
consistent1 = (lines[0][0] == lines[1][0]).all() and \
(lines[1][0] == lines[2][0]).all()
consistent2 = (lines[0][1] == lines[1][1]).all() and \
(lines[1][1] == lines[2][1]).all()
- nt.assert_true(consistent1 and consistent2, 'Inconsistent results')
-
+ npt.assert_(consistent1 and consistent2, 'Inconsistent results')
diff --git a/nitime/tests/test_viz.py b/nitime/tests/test_viz.py
index 8406e4f..2cba9c4 100644
--- a/nitime/tests/test_viz.py
+++ b/nitime/tests/test_viz.py
@@ -6,19 +6,23 @@ Smoke testing of the viz module.
import numpy as np
import numpy.testing as npt
+import pytest
from nitime.timeseries import TimeSeries
from nitime.analysis import CorrelationAnalyzer
from nitime.viz import drawmatrix_channels, drawgraph_channels, plot_xcorr
try:
- import nx
+ import networkx
no_networkx = False
no_networkx_msg = ''
except ImportError as e:
no_networkx = True
no_networkx_msg = e.args[0]
+import os
+is_ci = "CI" in os.environ
+
roi_names = ['a','b','c','d','e','f','g','h','i','j']
data = np.random.rand(10,1024)
@@ -28,9 +32,11 @@ T.metadata['roi'] = roi_names
#Initialize the correlation analyzer
C = CorrelationAnalyzer(T)
+@pytest.mark.skipif(is_ci, reason="Running on a CI server")
def test_drawmatrix_channels():
fig01 = drawmatrix_channels(C.corrcoef, roi_names, size=[10., 10.], color_anchor=0)
+@pytest.mark.skipif(is_ci, reason="Running on a CI server")
def test_plot_xcorr():
xc = C.xcorr_norm
@@ -40,6 +46,7 @@ def test_plot_xcorr():
line_labels=['a', 'b'])
-@npt.dec.skipif(no_networkx, no_networkx_msg)
+@pytest.mark.skipif(is_ci, reason="Running on a CI server")
+@pytest.mark.skipif(no_networkx, reason=no_networkx_msg)
def test_drawgraph_channels():
fig04 = drawgraph_channels(C.corrcoef, roi_names)
diff --git a/nitime/timeseries.py b/nitime/timeseries.py
index 86710ec..4fd44a9 100644
--- a/nitime/timeseries.py
+++ b/nitime/timeseries.py
@@ -32,7 +32,6 @@ import numpy as np
# Our own
from nitime import descriptors as desc
-import nitime.six as six
#-----------------------------------------------------------------------------
# Module globals
@@ -95,7 +94,7 @@ def get_time_unit(obj):
except TypeError:
return None
else:
- return get_time_unit(six.advance_iterator(it))
+ return get_time_unit(next(it))
class TimeArray(np.ndarray, TimeInterface):
@@ -255,93 +254,89 @@ class TimeArray(np.ndarray, TimeInterface):
val *= self._conversion_factor
return val
- def __add__(self,val):
+ def __add__(self, val):
val = self._convert_if_needed(val)
- return np.ndarray.__add__(self,val)
+ return np.ndarray.__add__(self, val)
- def __sub__(self,val):
+ def __sub__(self, val):
val = self._convert_if_needed(val)
- return np.ndarray.__sub__(self,val)
+ return np.ndarray.__sub__(self, val)
- def __radd__(self,val):
+ def __radd__(self, val):
val = self._convert_if_needed(val)
- return np.ndarray.__radd__(self,val)
+ return np.ndarray.__radd__(self, val)
- def __rsub__(self,val):
+ def __rsub__(self, val):
val = self._convert_if_needed(val)
- return np.ndarray.__rsub__(self,val)
+ return np.ndarray.__rsub__(self, val)
- def __lt__(self,val):
+ def __lt__(self, val):
val = self._convert_if_needed(val)
- return np.ndarray.__lt__(self,val)
+ return np.ndarray.__lt__(self, val)
- def __gt__(self,val):
+ def __gt__(self, val):
val = self._convert_if_needed(val)
- return np.ndarray.__gt__(self,val)
+ return np.ndarray.__gt__(self, val)
- def __le__(self,val):
+ def __le__(self, val):
val = self._convert_if_needed(val)
- return np.ndarray.__le__(self,val)
+ return np.ndarray.__le__(self, val)
- def __ge__(self,val):
+ def __ge__(self, val):
val = self._convert_if_needed(val)
return np.ndarray.__ge__(self,val)
- def __eq__(self,val):
+ def __eq__(self, val):
val = self._convert_if_needed(val)
return np.ndarray.__eq__(self,val)
-
- def min(self, *args,**kwargs):
- ret = TimeArray(np.ndarray.min(self, *args,**kwargs),
+
+ def min(self, *args, **kwargs):
+ ret = TimeArray(np.ndarray.min(self, *args, **kwargs),
time_unit=base_unit)
ret.convert_unit(self.time_unit)
return ret
def max(self, *args,**kwargs):
- ret = TimeArray(np.ndarray.max(self, *args,**kwargs),
+ ret = TimeArray(np.ndarray.max(self, *args, **kwargs),
time_unit=base_unit)
ret.convert_unit(self.time_unit)
return ret
- def mean(self, *args,**kwargs):
- ret = TimeArray(np.ndarray.mean(self, *args,**kwargs),
+ def mean(self, *args, **kwargs):
+ ret = TimeArray(np.ndarray.mean(self, *args, **kwargs),
time_unit=base_unit)
ret.convert_unit(self.time_unit)
return ret
- def ptp(self, *args,**kwargs):
- ret = TimeArray(np.ndarray.ptp(self, *args,**kwargs),
- time_unit=base_unit)
+ def ptp(self, *args, **kwargs):
+ ret = TimeArray(np.ndarray.ptp(self, *args, **kwargs),
+ time_unit=base_unit)
ret.convert_unit(self.time_unit)
return ret
def sum(self, *args,**kwargs):
- ret = TimeArray(np.ndarray.sum(self, *args,**kwargs),
- time_unit=base_unit)
+ ret = TimeArray(np.ndarray.sum(self, *args, **kwargs),
+ time_unit=base_unit)
ret.convert_unit(self.time_unit)
return ret
-
+
def prod(self, *args, **kwargs):
e_s = "Product computation changes TimeArray units"
raise NotImplementedError(e_s)
-
-
+
def var(self, *args, **kwargs):
e_s = "Variance computation changes TimeArray units"
raise NotImplementedError(e_s)
-
def std(self, *args, **kwargs):
"""Returns the standard deviation of this TimeArray (with time units)
-
for detailed information, see numpy.std()
"""
- ret = TimeArray(np.ndarray.std(self, *args,**kwargs),
- time_unit=base_unit)
+ ret = TimeArray(np.ndarray.std(self, *args, **kwargs),
+ time_unit=base_unit)
ret.convert_unit(self.time_unit)
return ret
-
def index_at(self, t, tol=None, mode='closest'):
""" Returns the integer indices that corresponds to the time t
@@ -907,7 +902,8 @@ class Frequency(float):
def __repr__(self):
- return str(self) + ' Hz'
+ return str(float(self)) + ' Hz'
+
def to_period(self, time_unit=base_unit):
"""Convert the value of a frequency to the corresponding period
@@ -985,43 +981,43 @@ class TimeSeriesBase(object):
# add some methods that implement arithmetic on the timeseries data
def __add__(self, other):
out = self.copy()
- out.data = out.data.__add__(other)
+ out.data = out.data.__add__(np.asanyarray(other).T)
return out
def __sub__(self, other):
out = self.copy()
- out.data = out.data.__sub__(other)
+ out.data = out.data.__sub__(np.asanyarray(other).T)
return out
def __mul__(self, other):
out = self.copy()
- out.data = out.data.__mul__(other)
+ out.data = out.data.__mul__(np.asanyarray(other).T)
return out
def __div__(self, other):
out = self.copy()
- out.data = out.data.__div__(other)
+ out.data = out.data.__truediv__(np.asanyarray(other).T)
return out
-
- __truediv__ = __div__ # for py3k
+
+ __truediv__ = __div__ # for py3k
def __iadd__(self, other):
- self.data.__iadd__(other)
+ self.data.__iadd__(np.asanyarray(other).T)
return self
def __isub__(self, other):
- self.data.__isub__(other)
+ self.data.__isub__(np.asanyarray(other).T)
return self
def __imul__(self, other):
- self.data.__imul__(other)
+ self.data.__imul__(np.asanyarray(other).T)
return self
def __idiv__(self, other):
- self.data.__itruediv__(other)
+ self.data.__itruediv__(np.asanyarray(other).T)
return self
- __itruediv__ = __idiv__ # for py3k
+ __itruediv__ = __idiv__ # for py3k
class TimeSeries(TimeSeriesBase):
"""Represent data collected at uniform intervals.
@@ -1077,7 +1073,7 @@ class TimeSeries(TimeSeriesBase):
t0 : float
If you provide a sampling rate, you can optionally also provide a
starting time.
- time
+ time
Instead of sampling rate, you can explicitly provide an object of
class UniformTime. Note that you can still also provide a different
sampling_rate/sampling_interval/duration to take the place of the
@@ -1155,8 +1151,8 @@ class TimeSeries(TimeSeriesBase):
duration = time.duration
length = time.shape[-1]
#If changing the duration requires a change to the
- #sampling_rate, make sure that this was explicitely required by
- #the user - if the user did not explicitely set the
+ #sampling_rate, make sure that this was explicitly required by
+ #the user - if the user did not explicitly set the
#sampling_rate, or it is inconsistent, throw an error:
data_len = np.array(data).shape[-1]
@@ -1494,7 +1490,7 @@ class Events(TimeInterface):
Parameters
----------
time : array or TimeArray
- The times at which events occured
+ The times at which events occurred
labels : array, optional
@@ -1521,7 +1517,7 @@ class Events(TimeInterface):
# Ensure that the dict of data values has a known, uniform structure:
# all values must be arrays, with at least one dimension.
new_data = {}
- for k, v in six.iteritems(data):
+ for k, v in data.items():
if np.iterable(v):
v = np.asanyarray(v)
else:
diff --git a/nitime/utils.py b/nitime/utils.py
index ff5b6cc..b0b9b68 100644
--- a/nitime/utils.py
+++ b/nitime/utils.py
@@ -4,10 +4,14 @@
from __future__ import print_function
import warnings
import numpy as np
+import scipy.ndimage as ndimage
+
from nitime.lazy import scipy_linalg as linalg
from nitime.lazy import scipy_signal as sig
from nitime.lazy import scipy_fftpack as fftpack
from nitime.lazy import scipy_signal_signaltools as signaltools
+from nitime.lazy import scipy_stats_distributions as dists
+from nitime.lazy import scipy_interpolate as interpolate
#-----------------------------------------------------------------------------
@@ -109,20 +113,6 @@ def ar_generator(N=512, sigma=1., coefs=None, drop_transients=0, v=None):
Parameters
----------
- N : int
- sequence length
- sigma : float
- power of the white noise driving process
- coefs : sequence
- AR coefficients for k = 1, 2, ..., P
- drop_transients : int
- number of initial IIR filter transient terms to drop
- v : ndarray
- custom noise process
-
- Parameters
- ----------
-
N : float
The number of points in the AR process generated. Default: 512
sigma : float
@@ -283,7 +273,7 @@ def expected_jk_variance(K):
asymptotic expansion of the trigamma function derived in
[Thompson_1994]
- Paramters
+ Parameters
---------
K : int
@@ -503,7 +493,7 @@ def adaptive_weights(yk, eigvals, sides='onesided', max_iter=150):
""")
# we'll hope this is a correct length for L
N = yk.shape[-1]
- L = N / 2 + 1 if sides == 'onesided' else N
+ L = N // 2 + 1 if sides == 'onesided' else N
return (np.multiply.outer(np.sqrt(eigvals), np.ones(L)), 2 * K)
rt_eig = np.sqrt(eigvals)
@@ -575,6 +565,204 @@ def adaptive_weights(yk, eigvals, sides='onesided', max_iter=150):
nu = 2 * (weights ** 2).sum(axis=-2)
return weights, nu
+
+def dpss_windows(N, NW, Kmax, interp_from=None, interp_kind='linear'):
+ """
+ Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
+ for a given frequency-spacing multiple NW and sequence length N.
+
+ Parameters
+ ----------
+ N : int
+ sequence length
+ NW : float, unitless
+ standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt
+ but with dt taken as 1
+ Kmax : int
+ number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
+ interp_from : int (optional)
+ The dpss can be calculated using interpolation from a set of dpss
+ with the same NW and Kmax, but shorter N. This is the length of this
+ shorter set of dpss windows.
+ interp_kind : str (optional)
+ This input variable is passed to scipy.interpolate.interp1d and
+ specifies the kind of interpolation as a string ('linear', 'nearest',
+ 'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
+ order of the spline interpolator to use.
+
+
+ Returns
+ -------
+ v, e : tuple,
+ v is an array of DPSS windows shaped (Kmax, N)
+ e are the eigenvalues
+
+ Notes
+ -----
+ Tridiagonal form of DPSS calculation from:
+
+ Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
+ uncertainty V: The discrete case. Bell System Technical Journal,
+ Volume 57 (1978), 1371430
+ """
+ Kmax = int(Kmax)
+ W = float(NW) / N
+ nidx = np.arange(N, dtype='d')
+
+ # In this case, we create the dpss windows of the smaller size
+ # (interp_from) and then interpolate to the larger size (N)
+ if interp_from is not None:
+ if interp_from > N:
+ e_s = 'In dpss_windows, interp_from is: %s ' % interp_from
+ e_s += 'and N is: %s. ' % N
+ e_s += 'Please enter interp_from smaller than N.'
+ raise ValueError(e_s)
+ dpss = []
+ d, e = dpss_windows(interp_from, NW, Kmax)
+ for this_d in d:
+ x = np.arange(this_d.shape[-1])
+ I = interpolate.interp1d(x, this_d, kind=interp_kind)
+ d_temp = I(np.linspace(0, this_d.shape[-1] - 1, N, endpoint=False))
+
+ # Rescale:
+ d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2))
+
+ dpss.append(d_temp)
+
+ dpss = np.array(dpss)
+
+ else:
+ # here we want to set up an optimization problem to find a sequence
+ # whose energy is maximally concentrated within band [-W,W].
+ # Thus, the measure lambda(T,W) is the ratio between the energy within
+ # that band, and the total energy. This leads to the eigen-system
+ # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
+ # eigenvalue is the sequence with maximally concentrated energy. The
+ # collection of eigenvectors of this system are called Slepian
+ # sequences, or discrete prolate spheroidal sequences (DPSS). Only the
+ # first K, K = 2NW/dt orders of DPSS will exhibit good spectral
+ # concentration
+ # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
+
+ # Here I set up an alternative symmetric tri-diagonal eigenvalue
+ # problem such that
+ # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
+ # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
+ # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
+ # [see Percival and Walden, 1993]
+ diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
+ off_diag = np.zeros_like(nidx)
+ off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
+ # put the diagonals in LAPACK "packed" storage
+ ab = np.zeros((2, N), 'd')
+ ab[1] = diagonal
+ ab[0, 1:] = off_diag[:-1]
+ # only calculate the highest Kmax eigenvalues
+ w = linalg.eigvals_banded(ab, select='i',
+ select_range=(N - Kmax, N - 1))
+ w = w[::-1]
+
+ # find the corresponding eigenvectors via inverse iteration
+ t = np.linspace(0, np.pi, N)
+ dpss = np.zeros((Kmax, N), 'd')
+ for k in range(Kmax):
+ dpss[k] = tridi_inverse_iteration(
+ diagonal, off_diag, w[k], x0=np.sin((k + 1) * t)
+ )
+
+ # By convention (Percival and Walden, 1993 pg 379)
+ # * symmetric tapers (k=0,2,4,...) should have a positive average.
+ # * antisymmetric tapers should begin with a positive lobe
+ fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
+ for i, f in enumerate(fix_symmetric):
+ if f:
+ dpss[2 * i] *= -1
+ # rather than test the sign of one point, test the sign of the
+ # linear slope up to the first (largest) peak
+ pk = np.argmax(np.abs(dpss[1::2, :N//2]), axis=1)
+ for i, p in enumerate(pk):
+ if np.sum(dpss[2 * i + 1, :p]) < 0:
+ dpss[2 * i + 1] *= -1
+
+ # Now find the eigenvalues of the original spectral concentration problem
+ # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
+ dpss_rxx = autocorr(dpss) * N
+ r = 4 * W * np.sinc(2 * W * nidx)
+ r[0] = 2 * W
+ eigvals = np.dot(dpss_rxx, r)
+
+ return dpss, eigvals
+
+
+def tapered_spectra(s, tapers, NFFT=None, low_bias=True):
+ """
+ Compute the tapered spectra of the rows of s.
+
+ Parameters
+ ----------
+
+ s : ndarray, (n_arr, n_pts)
+ An array whose rows are timeseries.
+
+ tapers : ndarray or container
+ Either the precomputed DPSS tapers, or the pair of parameters
+ (NW, K) needed to compute K tapers of length n_pts.
+
+ NFFT : int
+ Number of FFT bins to compute
+
+ low_bias : Boolean
+ If compute DPSS, automatically select tapers corresponding to
+ > 90% energy concentration.
+
+ Returns
+ -------
+
+ t_spectra : ndarray, shaped (n_arr, K, NFFT)
+ The FFT of the tapered sequences in s. First dimension is squeezed
+ out if n_arr is 1.
+ eigvals : ndarray
+ The eigenvalues are also returned if DPSS are calculated here.
+
+ """
+ N = s.shape[-1]
+ # XXX: don't allow NFFT < N -- not every implementation is so restrictive!
+ if NFFT is None or NFFT < N:
+ NFFT = N
+ rest_of_dims = s.shape[:-1]
+
+ s = s.reshape(-1, N)
+ # de-mean this sucker
+ s = remove_bias(s, axis=-1)
+
+ if not isinstance(tapers, np.ndarray):
+ # then tapers is (NW, K)
+ args = (N,) + tuple(tapers)
+ dpss, eigvals = dpss_windows(*args)
+ if low_bias:
+ keepers = (eigvals > 0.9)
+ dpss = dpss[keepers]
+ eigvals = eigvals[keepers]
+ tapers = dpss
+ else:
+ eigvals = None
+ K = tapers.shape[0]
+ sig_sl = [slice(None)] * len(s.shape)
+ sig_sl.insert(len(s.shape) - 1, np.newaxis)
+
+ # tapered.shape is (M, Kmax, N)
+ tapered = s[tuple(sig_sl)] * tapers
+
+ # compute the y_{i,k}(f) -- full FFT takes ~1.5x longer, but unpacking
+ # results of real-valued FFT eats up memory
+ t_spectra = fftpack.fft(tapered, n=NFFT, axis=-1)
+ t_spectra.shape = rest_of_dims + (K, NFFT)
+ if eigvals is None:
+ return t_spectra
+ return t_spectra, eigvals
+
+
+
def detect_lines(s, tapers, p=None, **taper_kws):
"""
Detect the presence of line spectra in s using the F-test
@@ -596,7 +784,7 @@ def detect_lines(s, tapers, p=None, **taper_kws):
a locally white spectrum, there is a threshold such that
there is a (1-p)% chance of a line amplitude being larger
than that threshold. Only detect lines with amplitude greater
- than this threshold. The default is 1/N, to control for false
+ than this threshold. The default is 1/NFFT, to control for false
positives.
taper_kws
@@ -616,9 +804,6 @@ def detect_lines(s, tapers, p=None, **taper_kws):
sn = sn.sum(axis=0)
"""
- from nitime.algorithms import tapered_spectra, dpss_windows
- import scipy.stats.distributions as dists
- import scipy.ndimage as ndimage
N = s.shape[-1]
# Some boiler-plate --
# 1) set up tapers
@@ -634,7 +819,7 @@ def detect_lines(s, tapers, p=None, **taper_kws):
# spectra is (n_arr, K, nfft)
spectra = tapered_spectra(s, tapers, **taper_kws)
nfft = spectra.shape[-1]
- spectra = spectra[...,:nfft/2 + 1]
+ spectra = spectra[..., :nfft//2 + 1]
# Set up some data for the following calculations --
# get the DC component of the taper spectra
@@ -801,7 +986,7 @@ def remove_bias(x, axis):
def crosscov(x, y, axis=-1, all_lags=False, debias=True, normalize=True):
- """Returns the crosscovariance sequence between two ndarrays.
+ r"""Returns the crosscovariance sequence between two ndarrays.
This is performed by calling fftconvolve on x, y[::-1]
Parameters
@@ -837,8 +1022,8 @@ def crosscov(x, y, axis=-1, all_lags=False, debias=True, normalize=True):
Also note that this routine is the workhorse for all auto/cross/cov/corr
functions.
-
"""
+
if x.shape[axis] != y.shape[axis]:
raise ValueError(
'crosscov() only works on same-length sequences for now'
@@ -859,7 +1044,7 @@ def crosscov(x, y, axis=-1, all_lags=False, debias=True, normalize=True):
def crosscorr(x, y, **kwargs):
- """
+ r"""
Returns the crosscorrelation sequence between two ndarrays.
This is performed by calling fftconvolve on x, y[::-1]
@@ -899,7 +1084,7 @@ def crosscorr(x, y, **kwargs):
def autocov(x, **kwargs):
- """Returns the autocovariance of signal s at all lags.
+ r"""Returns the autocovariance of signal s at all lags.
Parameters
----------
@@ -938,7 +1123,7 @@ def autocov(x, **kwargs):
def autocorr(x, **kwargs):
- """Returns the autocorrelation of signal s at all lags.
+ r"""Returns the autocorrelation of signal s at all lags.
Parameters
----------
@@ -971,14 +1156,12 @@ def autocorr(x, **kwargs):
def fftconvolve(in1, in2, mode="full", axis=None):
""" Convolve two N-dimensional arrays using FFT. See convolve.
- This is a fix of scipy.signal.fftconvolve, adding an axis argument and
- importing locally the stuff only needed for this function
-
+ This is a fix of scipy.signal.fftconvolve, adding an axis argument.
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
- complex_result = (np.issubdtype(in1.dtype, np.complex) or
- np.issubdtype(in2.dtype, np.complex))
+ complex_result = (np.issubdtype(in1.dtype, np.complex128) or
+ np.issubdtype(in2.dtype, np.complex128))
if axis is None:
size = s1 + s2 - 1
@@ -1009,7 +1192,7 @@ def fftconvolve(in1, in2, mode="full", axis=None):
if mode == "full":
return ret
elif mode == "same":
- if np.product(s1, axis=0) > np.product(s2, axis=0):
+ if np.prod(s1, axis=0) > np.prod(s2, axis=0):
osize = s1
else:
osize = s2
@@ -1022,15 +1205,15 @@ def fftconvolve(in1, in2, mode="full", axis=None):
# 'get' utils
#-----------------------------------------------------------------------------
def get_freqs(Fs, n):
- """Returns the center frequencies of the frequency decomposotion of a time
+ """Returns the center frequencies of the frequency decomposition of a time
series of length n, sampled at Fs Hz"""
- return np.linspace(0, float(Fs) / 2, float(n) / 2 + 1)
+ return np.linspace(0, Fs / 2, int(n / 2 + 1))
def circle_to_hz(omega, Fsamp):
"""For a frequency grid spaced on the unit circle of an imaginary plane,
- return the corresponding freqency grid in Hz.
+ return the corresponding frequency grid in Hz.
"""
return Fsamp * omega / (2 * np.pi)
@@ -1853,8 +2036,8 @@ def zscore(time_series, axis=-1):
st = time_series.std(axis=axis)
sl = [slice(None)] * len(time_series.shape)
sl[axis] = np.newaxis
- zt = time_series - et[sl]
- zt /= st[sl]
+ zt = time_series - et[tuple(sl)]
+ zt /= st[tuple(sl)]
return zt
@@ -1907,9 +2090,9 @@ def fir_design_matrix(events, len_hrf):
----------
events : 1-d int array
- Integers denoting different kinds of events, occuring at the time
+ Integers denoting different kinds of events, occurring at the time
corresponding to the bin represented by each slot in the array. In
- time-bins in which no event occured, a 0 should be entered. If negative
+ time-bins in which no event occurred, a 0 should be entered. If negative
event values are entered, they will be used as "negative" events, as in
events that should be contrasted with the postitive events (typically -1
and 1 can be used for a simple contrast of two conditions)
@@ -1930,57 +2113,24 @@ def fir_design_matrix(events, len_hrf):
fir_matrix = np.zeros((events.shape[0], len_hrf * event_types.shape[0]))
for t in event_types:
- idx_h_a = np.where(event_types == t)[0] * len_hrf
+ idx_h_a = (np.array(np.where(event_types == t)[0]) * len_hrf)[0]
idx_h_b = idx_h_a + len_hrf
idx_v = np.where(events == t)[0]
for idx_v_a in idx_v:
idx_v_b = idx_v_a + len_hrf
fir_matrix[idx_v_a:idx_v_b, idx_h_a:idx_h_b] += (np.eye(len_hrf) *
- np.sign(t))
+ np.sign(t))
return fir_matrix
-#We carry around a copy of the hilbert transform analytic signal from newer
-#versions of scipy, in case someone is using an older version of scipy with a
-#borked hilbert:
-def hilbert_from_new_scipy(x, N=None, axis=-1):
- """This is a verbatim copy of scipy.signal.hilbert from scipy version
- 0.8dev, which we carry around in order to use in case the version of scipy
- installed is old enough to have a broken implementation of hilbert """
-
- x = np.asarray(x)
- if N is None:
- N = x.shape[axis]
- if N <= 0:
- raise ValueError("N must be positive.")
- if np.iscomplexobj(x):
- print("Warning: imaginary part of x ignored.")
- x = np.real(x)
- Xf = fftpack.fft(x, N, axis=axis)
- h = np.zeros(N)
- if N % 2 == 0:
- h[0] = h[N / 2] = 1
- h[1:N / 2] = 2
- else:
- h[0] = 1
- h[1:(N + 1) / 2] = 2
-
- if len(x.shape) > 1:
- ind = [np.newaxis] * x.ndim
- ind[axis] = slice(None)
- h = h[ind]
- x = fftpack.ifft(Xf * h, axis=axis)
- return x
-
-
#---------- MAR utilities ----------------------------------------
# These utilities are used in the computation of multivariate autoregressive
# models (used in computing Granger causality):
def crosscov_vector(x, y, nlags=None):
- """
+ r"""
This method computes the following function
.. math::
@@ -2033,7 +2183,7 @@ def crosscov_vector(x, y, nlags=None):
def autocov_vector(x, nlags=None):
- """
+ r"""
This method computes the following function
.. math::
@@ -2164,14 +2314,14 @@ def akaike_information_criterion(ecov, p, m, Ntotal, corrected=False):
AIC = (2 * (np.log(linalg.det(ecov))) +
((2 * (p ** 2) * m) / (Ntotal)))
- if corrected is None:
- return AIC
- else:
+ if corrected:
return AIC + (2 * m * (m + 1)) / (Ntotal - m - 1)
+ else:
+ return AIC
def bayesian_information_criterion(ecov, p, m, Ntotal):
- """The Bayesian Information Criterion, also known as the Schwarz criterion
+ r"""The Bayesian Information Criterion, also known as the Schwarz criterion
is a measure of goodness of fit of a statistical model, based on the
number of model parameters and the likelihood of the model
diff --git a/nitime/version.py b/nitime/version.py
deleted file mode 100644
index e429fb4..0000000
--- a/nitime/version.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""nitime version/release information"""
-from nitime.six.moves import map
-
-# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
-_version_major = 0
-_version_minor = 5
-_version_micro = '' # use '' for first of series, number for 1 and above
-_version_extra = ''
-#_version_extra = '' # Uncomment this for full releases
-
-# Construct full version string from these.
-_ver = [_version_major, _version_minor]
-if _version_micro:
- _ver.append(_version_micro)
-if _version_extra:
- _ver.append(_version_extra)
-
-__version__ = '.'.join(map(str, _ver))
-
-CLASSIFIERS = ["Development Status :: 3 - Alpha",
- "Environment :: Console",
- "Intended Audience :: Science/Research",
- "License :: OSI Approved :: BSD License",
- "Operating System :: OS Independent",
- "Programming Language :: Python",
- "Topic :: Scientific/Engineering"]
-
-description = "Nitime: timeseries analysis for neuroscience data"
-
-# Note: this long_description is actually a copy/paste from the top-level
-# README.txt, so that it shows up nicely on PyPI. So please remember to edit
-# it only in one place and sync it correctly.
-long_description = """
-===================================================
- Nitime: timeseries analysis for neuroscience data
-===================================================
-
-Nitime is library of tools and algorithms for the analysis of time-series data
-from neuroscience experiments. It contains a implementation of numerical
-algorithms for time-series analysis both in the time and spectral domains, a
-set of container objects to represent time-series, and auxiliary objects that
-expose a high level interface to the numerical machinery and make common
-analysis tasks easy to express with compact and semantically clear code.
-
-Website and mailing list
-========================
-
-Current information can always be found at the nitime `website`_. Questions and
-comments can be directed to the mailing `list`_.
-
-.. _website: http://nipy.org/nitime
-.. _list: http://mail.scipy.org/mailman/listinfo/nipy-devel
-
-Code
-====
-
-You can find our sources and single-click downloads:
-
-* `Main repository`_ on Github.
-* Documentation_ for all releases and current development tree.
-* Download as a tar/zip file the `current trunk`_.
-* Downloads of all `available releases`_.
-
-.. _main repository: http://github.com/nipy/nitime
-.. _Documentation: http://nipy.org/nitime
-.. _current trunk: http://github.com/nipy/nitime/archives/master
-.. _available releases: http://github.com/nipy/nitime/downloads
-
-
-License information
-===================
-
-Nitime is licensed under the terms of the new BSD license. See the file
-"LICENSE" for information on the history of this software, terms & conditions
-for usage, and a DISCLAIMER OF ALL WARRANTIES.
-
-All trademarks referenced herein are property of their respective holders.
-
-Copyright (c) 2006-2012, NIPY Developers
-All rights reserved.
-"""
-
-NAME = "nitime"
-MAINTAINER = "Nipy Developers"
-MAINTAINER_EMAIL = "nipy-devel@neuroimaging.scipy.org"
-DESCRIPTION = description
-LONG_DESCRIPTION = long_description
-URL = "http://nipy.org/nitime"
-DOWNLOAD_URL = "http://github.com/nipy/nitime/downloads"
-LICENSE = "Simplified BSD"
-AUTHOR = "Nitime developers"
-AUTHOR_EMAIL = "nipy-devel@neuroimaging.scipy.org"
-PLATFORMS = "OS Independent"
-MAJOR = _version_major
-MINOR = _version_minor
-MICRO = _version_micro
-VERSION = __version__
-PACKAGES = ['nitime',
- 'nitime.tests',
- 'nitime.fmri',
- 'nitime.fmri.tests',
- 'nitime.algorithms',
- 'nitime.algorithms.tests',
- 'nitime.analysis',
- 'nitime.analysis.tests',
- ]
-PACKAGE_DATA = {"nitime": ["LICENSE", "tests/*.txt", "tests/*.npy",
- "data/*.nii.gz","data/*.txt", "data/*.csv"]}
-REQUIRES = ["numpy", "matplotlib", "scipy"]
diff --git a/nitime/viz.py b/nitime/viz.py
index e86de2d..8072546 100644
--- a/nitime/viz.py
+++ b/nitime/viz.py
@@ -7,20 +7,15 @@ from __future__ import print_function
# If you are running nosetests right now, you might want to use 'agg' as a backend:
import sys
-from nitime.six.moves import map
-from nitime.six.moves import zip
-if "nose" in sys.modules:
- import matplotlib
- matplotlib.use('agg')
-
+
# Then do all the rest of it:
import numpy as np
from scipy import fftpack
-from matplotlib import mpl
+import matplotlib as mpl
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.colors as colors
-from mpl_toolkits.axes_grid import make_axes_locatable
+from mpl_toolkits.axes_grid1 import make_axes_locatable
from nitime import timeseries as ts
import nitime.utils as tsu
@@ -34,8 +29,8 @@ if matplotlib.__version__[:3] == '1.3' or matplotlib.__version__[:3] == '1.4':
import nitime._mpl_units as mpl_units
import matplotlib.axis as ax
ax.munits = mpl_units
-
-from nitime.utils import triu_indices
+
+from nitime.utils import tril_indices
#Some visualization functions require networkx. Import that if possible:
try:
@@ -267,23 +262,25 @@ def drawmatrix_channels(in_m, channel_names=None, fig=None, x_tick_rot=0,
ax_im = fig.add_subplot(1, 1, 1)
- #If you want to draw the colorbar:
+ # If you want to draw the colorbar:
if colorbar:
divider = make_axes_locatable(ax_im)
ax_cb = divider.new_vertical(size="10%", pad=0.1, pack_start=True)
fig.add_axes(ax_cb)
- #Make a copy of the input, so that you don't make changes to the original
- #data provided
+ # Make a copy of the input, so that you don't make changes to the original
+ # data provided
m = in_m.copy()
- #Null the upper triangle, so that you don't get the redundant and the
- #diagonal values:
- idx_null = triu_indices(m.shape[0])
+ # Null the **lower** triangle, so that you don't get the redundant and the
+ # diagonal values:
+ idx_null = tril_indices(m.shape[0])
m[idx_null] = np.nan
+ # tranpose the upper triangle to lower
+ m = m.T
- #Extract the minimum and maximum values for scaling of the
- #colormap/colorbar:
+ # Extract the minimum and maximum values for scaling of the
+ # colormap/colorbar:
max_val = np.nanmax(m)
min_val = np.nanmin(m)
@@ -298,14 +295,14 @@ def drawmatrix_channels(in_m, channel_names=None, fig=None, x_tick_rot=0,
color_min = color_anchor[0]
color_max = color_anchor[1]
- #The call to imshow produces the matrix plot:
+ # The call to imshow produces the matrix plot:
im = ax_im.imshow(m, origin='upper', interpolation='nearest',
- vmin=color_min, vmax=color_max, cmap=cmap)
+ vmin=color_min, vmax=color_max, cmap=cmap)
- #Formatting:
+ # Formatting:
ax = ax_im
ax.grid(True)
- #Label each of the cells with the row and the column:
+ # Label each of the cells with the row and the column:
if channel_names is not None:
for i in range(0, m.shape[0]):
if i < (m.shape[0] - 1):
@@ -323,7 +320,7 @@ def drawmatrix_channels(in_m, channel_names=None, fig=None, x_tick_rot=0,
ax.set_ybound([-0.5, N - 0.5])
ax.set_xbound([-0.5, N - 1.5])
- #Make the tick-marks invisible:
+ # Make the tick-marks invisible:
for line in ax.xaxis.get_ticklines():
line.set_markeredgewidth(0)
@@ -335,17 +332,17 @@ def drawmatrix_channels(in_m, channel_names=None, fig=None, x_tick_rot=0,
if title is not None:
ax.set_title(title)
- #The following produces the colorbar and sets the ticks
+ # The following produces the colorbar and sets the ticks
if colorbar:
- #Set the ticks - if 0 is in the interval of values, set that, as well
- #as the maximal and minimal values:
+ # Set the ticks - if 0 is in the interval of values, set that, as well
+ # as the maximal and minimal values:
if min_val < 0:
ticks = [color_min, min_val, 0, max_val, color_max]
- #Otherwise - only set the minimal and maximal value:
+ # Otherwise - only set the minimal and maximal value:
else:
ticks = [color_min, min_val, max_val, color_max]
- #This makes the colorbar:
+ # This makes the colorbar:
cb = fig.colorbar(im, cax=ax_cb, orientation='horizontal',
cmap=cmap,
norm=im.norm,
@@ -383,7 +380,8 @@ def drawgraph_channels(in_m, channel_names=None, cmap=plt.cm.RdBu_r,
node_colors: defaults to white,
- title:
+ title: str
+ Sets a title for the figure.
layout, defaults to nx.circular_layout
Returns
@@ -408,16 +406,15 @@ def drawgraph_channels(in_m, channel_names=None, cmap=plt.cm.RdBu_r,
if node_colors is None:
node_colors = ['w'] * nnodes
- #Make a copy, avoiding making changes to the original data:
+ # Make a copy, avoiding making changes to the original data:
m = in_m.copy()
- #Set the diagonal values to the minimal value of the matrix, so that the
- #vrange doesn't always get stretched to 1:
+ # Set the diagonal values to the minimal value of the matrix, so that the
+ # vrange doesn't always get stretched to 1:
m[np.arange(nnodes), np.arange(nnodes)] = min(np.nanmin(m), -np.nanmax(m))
range_setter = max(abs(np.nanmin(m)), abs(np.nanmax(m)))
vrange = [-range_setter, range_setter]
- #m[np.where(np.isnan(m))] = 0
if threshold is None:
#If there happens to be an off-diagnoal edge in the adjacency matrix
#which is just as small as the minimum, we don't want to drop that one:
@@ -683,7 +680,7 @@ def draw_graph(G,
# Build a 'weighted degree' array obtained by adding the (absolute value)
# of the weights for all edges pointing to each node:
- amat = nx.adj_matrix(G).A # get a normal array out of it
+ amat = nx.to_numpy_array(G) # get a normal array out of it
degarr = abs(amat).sum(0) # weights are sums across rows
# Map the degree to the 0-1 range so we can use it for sizing the nodes.
@@ -722,7 +719,6 @@ def draw_graph(G,
# e[2] is edge value: edges_iter returns (i,j,data)
gvals = np.array([e[2]['weight'] for e in G.edges(data=True)])
gvmin, gvmax = gvals.min(), gvals.max()
-
gvrange = gvmax - gvmin
if vrange is None:
vrange = gvmin, gvmax
@@ -753,7 +749,8 @@ def draw_graph(G,
nodelist=[nod],
node_color=node_colors[nod],
node_shape=node_shapes[nod],
- node_size=node_sizes[nod])
+ node_size=node_sizes[nod],
+ edgecolors='k')
# Draw edges
if not isinstance(G, nx.DiGraph):
# Undirected graph, simple lines for edges
@@ -776,12 +773,12 @@ def draw_graph(G,
edge_color = [tuple(edge_cmap(ecol, fade))]
#dbg:
#print u,v,y
- draw_networkx_edges(G,
- pos,
- edgelist=[(u, v)],
- width=min_width + alpha * max_width,
- edge_color=edge_color,
- style=edge_style)
+ nx.draw_networkx_edges(G,
+ pos,
+ edgelist=[(u, v)],
+ width=min_width + alpha * max_width,
+ edge_color=edge_color,
+ style=edge_style)
else:
# Directed graph, use arrows.
# XXX - this is currently broken.
@@ -838,204 +835,13 @@ def lab2node(labels, labels_dict):
return [labels_dict[ll] for ll in labels]
-### Patched version for networx draw_networkx_edges, sent to Aric.
-def draw_networkx_edges(G, pos,
- edgelist=None,
- width=1.0,
- edge_color='k',
- style='solid',
- alpha=None,
- edge_cmap=None,
- edge_vmin=None,
- edge_vmax=None,
- ax=None,
- arrows=True,
- **kwds):
- """Draw the edges of the graph G
-
- This draws only the edges of the graph G.
-
- pos is a dictionary keyed by vertex with a two-tuple
- of x-y positions as the value.
- See networkx.layout for functions that compute node positions.
-
- edgelist is an optional list of the edges in G to be drawn.
- If provided, only the edges in edgelist will be drawn.
-
- edgecolor can be a list of matplotlib color letters such as 'k' or
- 'b' that lists the color of each edge; the list must be ordered in
- the same way as the edge list. Alternatively, this list can contain
- numbers and those number are mapped to a color scale using the color
- map edge_cmap. Finally, it can also be a list of (r,g,b) or (r,g,b,a)
- tuples, in which case these will be used directly to color the edges. If
- the latter mode is used, you should not provide a value for alpha, as it
- would be applied globally to all lines.
-
- For directed graphs, 'arrows' (actually just thicker stubs) are drawn
- at the head end. Arrows can be turned off with keyword arrows=False.
-
- See draw_networkx for the list of other optional parameters.
-
- """
- try:
- import matplotlib.pylab as pylab
- import matplotlib.cbook as cb
- from matplotlib.colors import colorConverter, Colormap
- from matplotlib.collections import LineCollection
- except ImportError:
- raise ImportError("Matplotlib required for draw()")
- except RuntimeError:
- pass # unable to open display
-
- if ax is None:
- ax = pylab.gca()
-
- if edgelist is None:
- edgelist = G.edges()
-
- if not edgelist or len(edgelist) == 0: # no edges!
- return None
-
- # set edge positions
- edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
-
- if not cb.iterable(width):
- lw = (width,)
- else:
- lw = width
-
- if not cb.is_string_like(edge_color) \
- and cb.iterable(edge_color) \
- and len(edge_color) == len(edge_pos):
- if np.alltrue([cb.is_string_like(c)
- for c in edge_color]):
- # (should check ALL elements)
- # list of color letters such as ['k','r','k',...]
- edge_colors = tuple([colorConverter.to_rgba(c, alpha)
- for c in edge_color])
- elif np.alltrue([not cb.is_string_like(c)
- for c in edge_color]):
- # If color specs are given as (rgb) or (rgba) tuples, we're OK
- if np.alltrue([cb.iterable(c) and len(c) in (3, 4)
- for c in edge_color]):
- edge_colors = tuple(edge_color)
- alpha = None
- else:
- # numbers (which are going to be mapped with a colormap)
- edge_colors = None
- else:
- e_s = 'edge_color must consist of either color names or numbers'
- raise ValueError(e_s)
- else:
- if len(edge_color) == 1:
- edge_colors = (colorConverter.to_rgba(edge_color, alpha),)
- else:
- e_s = 'edge_color must be a single color or list of exactly'
- e_s += 'm colors where m is the number or edges'
- raise ValueError(e_s)
- edge_collection = LineCollection(edge_pos,
- colors=edge_colors,
- linewidths=lw,
- antialiaseds=(1,),
- linestyle=style,
- transOffset=ax.transData,
- )
-
- # Note: there was a bug in mpl regarding the handling of alpha values for
- # each line in a LineCollection. It was fixed in matplotlib in r7184 and
- # r7189 (June 6 2009). We should then not set the alpha value globally,
- # since the user can instead provide per-edge alphas now. Only set it
- # globally if provided as a scalar.
- if cb.is_numlike(alpha):
- edge_collection.set_alpha(alpha)
-
- # need 0.87.7 or greater for edge colormaps
- if edge_colors is None:
- if edge_cmap is not None:
- assert(isinstance(edge_cmap, Colormap))
- edge_collection.set_array(np.asarray(edge_color))
- edge_collection.set_cmap(edge_cmap)
- if edge_vmin is not None or edge_vmax is not None:
- edge_collection.set_clim(edge_vmin, edge_vmax)
- else:
- edge_collection.autoscale()
- pylab.sci(edge_collection)
-
-# else:
-# sys.stderr.write(\
-# """matplotlib version >= 0.87.7 required for colormapped edges.
-# (version %s detected)."""%matplotlib.__version__)
-# raise UserWarning(\
-# """matplotlib version >= 0.87.7 required for colormapped edges.
-# (version %s detected)."""%matplotlib.__version__)
-
- arrow_collection = None
-
- if G.is_directed() and arrows:
-
- # a directed graph hack
- # draw thick line segments at head end of edge
- # waiting for someone else to implement arrows that will work
- arrow_colors = (colorConverter.to_rgba('k', alpha),)
- a_pos = []
- p = 1.0 - 0.25 # make head segment 25 percent of edge length
- for src, dst in edge_pos:
- x1, y1 = src
- x2, y2 = dst
- dx = x2 - x1 # x offset
- dy = y2 - y1 # y offset
- d = np.sqrt(float(dx ** 2 + dy ** 2)) # length of edge
- if d == 0: # source and target at same position
- continue
- if dx == 0: # vertical edge
- xa = x2
- ya = dy * p + y1
- if dy == 0: # horizontal edge
- ya = y2
- xa = dx * p + x1
- else:
- theta = np.arctan2(dy, dx)
- xa = p * d * np.cos(theta) + x1
- ya = p * d * np.sin(theta) + y1
-
- a_pos.append(((xa, ya), (x2, y2)))
-
- arrow_collection = LineCollection(a_pos,
- colors=arrow_colors,
- linewidths=[4 * ww for ww in lw],
- antialiaseds=(1,),
- transOffset=ax.transData,
- )
-
- # update view
- minx = np.amin(np.ravel(edge_pos[:, :, 0]))
- maxx = np.amax(np.ravel(edge_pos[:, :, 0]))
- miny = np.amin(np.ravel(edge_pos[:, :, 1]))
- maxy = np.amax(np.ravel(edge_pos[:, :, 1]))
-
- w = maxx - minx
- h = maxy - miny
- padx, pady = 0.05 * w, 0.05 * h
- corners = (minx - padx, miny - pady), (maxx + padx, maxy + pady)
- ax.update_datalim(corners)
- ax.autoscale_view()
-
- edge_collection.set_zorder(1) # edges go behind nodes
- ax.add_collection(edge_collection)
- if arrow_collection:
- arrow_collection.set_zorder(1) # edges go behind nodes
- ax.add_collection(arrow_collection)
-
- return ax
-
-
def mkgraph(cmat, threshold=0.0, threshold2=None):
"""Make a weighted graph object out of an adjacency matrix.
The values in the original matrix cmat can be thresholded out. If only one
threshold is given, all values below that are omitted when creating edges.
If two thresholds are given, then values in the th2-th1 range are
- ommitted. This allows for the easy creation of weighted graphs with
+ omitted. This allows for the easy creation of weighted graphs with
positive and negative values where a range of weights around 0 is omitted.
Parameters
@@ -1425,3 +1231,133 @@ def plot_spectral_estimate(f, sdf, sdf_ests, limits=None, elabels=()):
ax.set_ylim(ax_limits)
ax.legend()
return fig
+
+
+# Patch in a fix to networkx's draw_networkx_nodes
+# This function is broken in version 1.11, and the fix is a one-line change:
+# and is the addition of the `**kwds` into the call to `ax.scatter` below.
+# Without this addition, `scatter` defaults to plot the points with the edge
+# color set to be the same as the face color, which caused our issue #153
+
+def draw_networkx_nodes(G, pos,
+ nodelist=None,
+ node_size=300,
+ node_color='r',
+ node_shape='o',
+ alpha=1.0,
+ cmap=None,
+ vmin=None,
+ vmax=None,
+ ax=None,
+ linewidths=None,
+ label=None,
+ **kwds):
+ """Draw the nodes of the graph G.
+
+ This draws only the nodes of the graph G.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ pos : dictionary
+ A dictionary with nodes as keys and positions as values.
+ Positions should be sequences of length 2.
+
+ ax : Matplotlib Axes object, optional
+ Draw the graph in the specified Matplotlib axes.
+
+ nodelist : list, optional
+ Draw only specified nodes (default G.nodes())
+
+ node_size : scalar or array
+ Size of nodes (default=300). If an array is specified it must be the
+ same length as nodelist.
+
+ node_color : color string, or array of floats
+ Node color. Can be a single color format string (default='r'),
+ or a sequence of colors with the same length as nodelist.
+ If numeric values are specified they will be mapped to
+ colors using the cmap and vmin,vmax parameters. See
+ matplotlib.scatter for more details.
+
+ node_shape : string
+ The shape of the node. Specification is as matplotlib.scatter
+ marker, one of 'so^>v<dph8' (default='o').
+
+ alpha : float
+ The node transparency (default=1.0)
+
+ cmap : Matplotlib colormap
+ Colormap for mapping intensities of nodes (default=None)
+
+ vmin,vmax : floats
+ Minimum and maximum for node colormap scaling (default=None)
+
+ linewidths : [None | scalar | sequence]
+ Line width of symbol border (default =1.0)
+
+ label : [None| string]
+ Label for legend
+
+ Returns
+ -------
+ matplotlib.collections.PathCollection
+ `PathCollection` of the nodes.
+
+ Examples
+ --------
+ >>> G=nx.dodecahedral_graph()
+ >>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
+
+ Also see the NetworkX drawing examples at
+ http://networkx.github.io/documentation/latest/gallery.html
+
+ See Also
+ --------
+ draw()
+ draw_networkx()
+ draw_networkx_edges()
+ draw_networkx_labels()
+ draw_networkx_edge_labels()
+ """
+ try:
+ import matplotlib.pyplot as plt
+ import numpy
+ except ImportError:
+ raise ImportError("Matplotlib required for draw()")
+ except RuntimeError:
+ print("Matplotlib unable to open display")
+ raise
+
+ if ax is None:
+ ax = plt.gca()
+
+ if nodelist is None:
+ nodelist = G.nodes()
+
+ if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
+ return None
+
+ try:
+ xy = numpy.asarray([pos[v] for v in nodelist])
+ except KeyError as e:
+ raise nx.NetworkXError('Node %s has no position.'%e)
+ except ValueError:
+ raise nx.NetworkXError('Bad value in node positions.')
+
+ node_collection = ax.scatter(xy[:, 0], xy[:, 1],
+ s=node_size,
+ c=node_color,
+ marker=node_shape,
+ cmap=cmap,
+ vmin=vmin,
+ vmax=vmax,
+ alpha=alpha,
+ linewidths=linewidths,
+ label=label,
+ **kwds)
+
+ node_collection.set_zorder(2)
+ return node_collection
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..3225e61
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,63 @@
+[build-system]
+requires = [
+ "setuptools",
+ "setuptools_scm[toml]>=6.2",
+ # As of numpy 1.25, you can now build against older APIs.
+ # https://numpy.org/doc/stable/release/1.25.0-notes.html
+ "numpy>=1.25; python_version > '3.8'",
+ # NEP29-minimum as of Aug 17, 2023 (1.25 doesn't support 3.8)
+ "numpy==1.22; python_version == '3.8'",
+]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "nitime"
+dynamic = ["version"]
+description = "Nitime: timeseries analysis for neuroscience data"
+readme = "README.txt"
+license = { file = "LICENSE" }
+requires-python = ">=3.8"
+authors = [
+ { name = "Nitime developers", email = "neuroimaging@python.org" },
+]
+maintainers = [
+ { name = "Nipy Developers", email = "neuroimaging@python.org" },
+]
+classifiers = [
+ "Development Status :: 3 - Alpha",
+ "Environment :: Console",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: BSD License",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Topic :: Scientific/Engineering",
+]
+dependencies = [
+ "matplotlib>=3.5",
+ "numpy>=1.22",
+ "scipy>=1.8",
+]
+
+[project.optional-dependencies]
+full = [
+ "networkx>=2.7",
+ "nibabel>=4.0",
+]
+
+[project.urls]
+Download = "http://github.com/nipy/nitime/downloads"
+Homepage = "http://nipy.org/nitime"
+
+[tool.setuptools.packages.find]
+include = ["nitime*"]
+
+[tool.setuptools_scm]
+write_to = "nitime/_version.py"
+
+[tool.cibuildwheel]
+# Disable PyPy
+skip = "pp*"
+
+# 64-bit builds only; 32-bit builds seem pretty niche these days, so
+# don't bother unless someone asks
+archs = ["auto64"]
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644
index 0000000..afb33a3
--- /dev/null
+++ b/requirements-dev.txt
@@ -0,0 +1,6 @@
+sphinx
+pytest
+pytest-cov
+nibabel
+networkx
+tomli; python_version < '3.11'
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..3b56825
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,8 @@
+# Auto-generated by tools/update_requirements.py
+--only-binary numpy,scipy
+--extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple
+matplotlib>=3.5
+numpy>=1.22
+scipy>=1.8
+networkx>=2.7
+nibabel>=4.0
diff --git a/setup.py b/setup.py
index 15d44f7..abb8a18 100755
--- a/setup.py
+++ b/setup.py
@@ -1,62 +1,29 @@
-#!/usr/bin/env python
-"""Setup file for the Python nitime package."""
+#!/usr/bin/env python3
+"""Setup file for the Python nitime package.
-import os
-import sys
-
-# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
-# update it when the contents of directories change.
-if os.path.exists('MANIFEST'):
- os.remove('MANIFEST')
-
-from distutils.core import setup
-
-# Get version and release info, which is all stored in nitime/version.py
-ver_file = os.path.join('nitime', 'version.py')
-with open(ver_file) as f:
- exec(f.read())
-
-opts = dict(name=NAME,
- maintainer=MAINTAINER,
- maintainer_email=MAINTAINER_EMAIL,
- description=DESCRIPTION,
- long_description=LONG_DESCRIPTION,
- url=URL,
- download_url=DOWNLOAD_URL,
- license=LICENSE,
- classifiers=CLASSIFIERS,
- author=AUTHOR,
- author_email=AUTHOR_EMAIL,
- platforms=PLATFORMS,
- version=VERSION,
- packages=PACKAGES,
- package_data=PACKAGE_DATA,
- requires=REQUIRES,
- )
+This file only contains cython components.
+See pyproject.toml for the remaining configuration.
+"""
+from setuptools import setup
try:
- from distutils.extension import Extension
- from Cython.Distutils import build_ext as build_pyx_ext
+ from setuptools import Extension
+ from Cython.Build import cythonize
from numpy import get_include
+
# add Cython extensions to the setup options
- exts = [ Extension('nitime._utils', ['nitime/_utils.pyx'],
- include_dirs=[get_include()]) ]
- opts['cmdclass'] = dict(build_ext=build_pyx_ext)
- opts['ext_modules'] = exts
+ exts = [
+ Extension(
+ 'nitime._utils',
+ ['nitime/_utils.pyx'],
+ include_dirs=[get_include()],
+ define_macros=[('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION')],
+ )
+ ]
+ opts = {'ext_modules': cythonize(exts, language_level='3')}
except ImportError:
# no loop for you!
- pass
-
-# For some commands, use setuptools. Note that we do NOT list install here!
-# If you want a setuptools-enhanced install, just run 'setupegg.py install'
-needs_setuptools = set(('develop', ))
-if len(needs_setuptools.intersection(sys.argv)) > 0:
- import setuptools
-
-# Only add setuptools-specific flags if the user called for setuptools, but
-# otherwise leave it alone
-if 'setuptools' in sys.modules:
- opts['zip_safe'] = False
+ opts = {}
# Now call the actual setup function
if __name__ == '__main__':
diff --git a/setup_egg.py b/setup_egg.py
deleted file mode 100644
index 6d5d631..0000000
--- a/setup_egg.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from setuptools import setup
-execfile('setup.py')
diff --git a/tools/apigen.py b/tools/apigen.py
index afce9da..e72cebe 100644
--- a/tools/apigen.py
+++ b/tools/apigen.py
@@ -168,7 +168,7 @@ class ApiDocWriter(object):
functions, classes = self._parse_lines(f)
f.close()
return functions, classes
-
+
def _parse_lines(self, linesource):
''' Parse lines of text for functions and classes '''
functions = []
@@ -206,18 +206,18 @@ class ApiDocWriter(object):
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
if not len(functions) and not len(classes):
- print 'WARNING: Empty -',uri # dbg
+ print('WARNING: Empty -',uri) # dbg
return ''
# Make a shorter version of the uri that omits the package name for
- # titles
+ # titles
uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
-
+
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
- ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title)
- + '\n\n')
+ ad += (chap_title+'\n' + self.rst_section_levels[1] *
+ len(chap_title) + '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
@@ -290,7 +290,7 @@ class ApiDocWriter(object):
elif match_type == 'package':
patterns = self.package_skip_patterns
else:
- raise ValueError('Cannot interpret match type "%s"'
+ raise ValueError('Cannot interpret match type "%s"'
% match_type)
# Match to URI without package name
L = len(self.package_name)
@@ -306,7 +306,7 @@ class ApiDocWriter(object):
return True
def discover_modules(self):
- ''' Return module sequence discovered from ``self.package_name``
+ ''' Return module sequence discovered from ``self.package_name``
Parameters
@@ -327,7 +327,7 @@ class ApiDocWriter(object):
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
- >>>
+ >>>
'''
modules = [self.package_name]
# raw directory parsing
@@ -350,7 +350,7 @@ class ApiDocWriter(object):
self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
-
+
def write_modules_api(self, modules,outdir):
# write the list
written_modules = []
@@ -375,7 +375,7 @@ class ApiDocWriter(object):
outdir : string
Directory name in which to store files
We create automatic filenames for each module
-
+
Returns
-------
None
@@ -389,7 +389,7 @@ class ApiDocWriter(object):
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules,outdir)
-
+
def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from written files
diff --git a/tools/build_modref_templates.py b/tools/build_modref_templates.py
index be584b6..284fd21 100755
--- a/tools/build_modref_templates.py
+++ b/tools/build_modref_templates.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Script to auto-generate our API docs.
"""
# stdlib imports
@@ -16,4 +16,4 @@ if __name__ == '__main__':
]
docwriter.write_api_docs(outdir)
docwriter.write_index(outdir, 'gen', relative_to='api')
- print '%d files written' % len(docwriter.written_modules)
+ print('%d files written' % len(docwriter.written_modules))
diff --git a/tools/build_release b/tools/build_release
index 00c16ab..2bfd958 100755
--- a/tools/build_release
+++ b/tools/build_release
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Nitime release build script.
"""
from toollib import *
diff --git a/tools/ex2rst b/tools/ex2rst
index 59207a3..8d55dab 100755
--- a/tools/ex2rst
+++ b/tools/ex2rst
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Note: this file is copied (possibly with minor modifications) from the
# sources of the PyMVPA project - http://pymvpa.org. It remains licensed as
@@ -99,7 +99,7 @@ def exfile2rst(filename):
proc_line = None
# handle doc start
if not indocs:
- # guarenteed to start with """
+ # guaranteed to start with """
if len(cleanline) > 3 \
and (cleanline.endswith('"""') \
or cleanline.endswith("'''")):
diff --git a/tools/github_stats.py b/tools/github_stats.py
index c214872..543971a 100755
--- a/tools/github_stats.py
+++ b/tools/github_stats.py
@@ -1,18 +1,19 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Simple tools to query github.com and gather stats about issues.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
-from __future__ import print_function
+
import json
import re
import sys
+import codecs
from datetime import datetime, timedelta
-from urllib import urlopen
+from urllib.request import urlopen
#-----------------------------------------------------------------------------
# Globals
@@ -42,7 +43,8 @@ def get_paged_request(url):
results = []
while url:
print("fetching %s" % url, file=sys.stderr)
- f = urlopen(url)
+ reader = codecs.getreader("utf-8")
+ f = reader(urlopen(url))
results.extend(json.load(f))
links = parse_link_header(f.headers)
url = links.get('next')
@@ -76,7 +78,7 @@ def is_pull_request(issue):
return 'pull_request_url' in issue
-def issues_closed_since(period=timedelta(days=730), project="nipy/nitime", pulls=False):
+def issues_closed_since(period=timedelta(days=365), project="nipy/nitime", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present."""
@@ -122,7 +124,7 @@ if __name__ == "__main__":
if len(sys.argv) > 1:
days = int(sys.argv[1])
else:
- days = 730
+ days = 310
# turn off to play interactively without redownloading, use %run -i
if 1:
diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py
index 000245a..b002f65 100755
--- a/tools/gitwash_dumper.py
+++ b/tools/gitwash_dumper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
''' Checkout gitwash repo into directory and do search replace on name '''
import os
@@ -84,7 +84,7 @@ def copy_replace(replace_pairs,
for rep_glob in rep_globs:
fnames += fnmatch.filter(out_fnames, rep_glob)
if verbose:
- print '\n'.join(fnames)
+ print('\n'.join(fnames))
for fname in fnames:
filename_search_replace(replace_pairs, fname, False)
for in_exp, out_exp in renames:
diff --git a/tools/make_examples.py b/tools/make_examples.py
index 97ce75a..0f1ae05 100755
--- a/tools/make_examples.py
+++ b/tools/make_examples.py
@@ -1,11 +1,9 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Run the py->rst conversion and run all examples.
This also creates the index.rst file appropriately, makes figures, etc.
"""
-#-----------------------------------------------------------------------------
# Library imports
-#-----------------------------------------------------------------------------
# Stdlib imports
import os
@@ -17,17 +15,16 @@ from glob import glob
# We must configure the mpl backend before making any further mpl imports
import matplotlib
-matplotlib.use('Agg')
import matplotlib.pyplot as plt
-
from matplotlib._pylab_helpers import Gcf
# Local tools
from toollib import *
-#-----------------------------------------------------------------------------
-# Globals
-#-----------------------------------------------------------------------------
+# Set MPL backend:
+matplotlib.use('Agg')
+
+# Globals:
examples_header = """
@@ -41,20 +38,22 @@ Examples
.. toctree::
:maxdepth: 2
-
-
+
+
"""
-#-----------------------------------------------------------------------------
-# Function defintions
-#-----------------------------------------------------------------------------
+# Function definitions::
# These global variables let show() be called by the scripts in the usual
# manner, but when generating examples, we override it to write the figures to
# files with a known name (derived from the script name) plus a counter
figure_basename = None
-# We must change the show command to save instead
+# We must change the show command to save instead
def show():
+ """
+ This over-rides matplotlibs `show` function to save instead of rendering to
+ the screen.
+ """
allfm = Gcf.get_all_fig_managers()
for fcount, fm in enumerate(allfm):
fm.canvas.figure.savefig('%s_%02i.png' %
@@ -63,9 +62,7 @@ def show():
_mpl_show = plt.show
plt.show = show
-#-----------------------------------------------------------------------------
-# Main script
-#-----------------------------------------------------------------------------
+# Main script::
# Work in examples directory
cd('examples')
@@ -79,9 +76,9 @@ sh('../../tools/ex2rst --project Nitime --outdir . .')
index = open('index.rst', 'w')
index.write(examples_header)
for name in [os.path.splitext(f)[0] for f in glob('*.rst')]:
- #Don't add the index in there to avoid sphinx errors and don't add the
- #note_about examples again (because it was added at the top):
- if name not in(['index','note_about_examples']):
+ # Don't add the index in there to avoid sphinx errors and don't add the
+ # note_about examples again (because it was added at the top):
+ if name not in(['index', 'note_about_examples']):
index.write(' %s\n' % name)
index.close()
# Execute each python script in the directory.
@@ -92,7 +89,9 @@ else:
os.mkdir('fig')
for script in glob('*.py'):
+ print(script)
figure_basename = pjoin('fig', os.path.splitext(script)[0])
- execfile(script)
+
+ with open(script) as f:
+ exec(f.read())
plt.close('all')
-
diff --git a/tools/release b/tools/release
index 26761a5..6d9d382 100755
--- a/tools/release
+++ b/tools/release
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Nitime release script.
This should only be run at real release time.
diff --git a/tools/sneeze.py b/tools/sneeze.py
deleted file mode 100755
index c9cb6b9..0000000
--- a/tools/sneeze.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-"""Script to run nose with coverage reporting without boilerplate params.
-
-Usage:
- sneeze test_coordinate_system.py
-
-Coverage will be reported on the module extracted from the test file
-name by removing the 'test_' prefix and '.py' suffix. In the above
-example, we'd get the coverage on the coordinate_system module. The
-test file is searched for an import statement containing the module
-name.
-
-The nose command would look like this:
-
-nosetests -sv --with-coverage --cover-package=nipy.core.reference.coordinate_system test_coordinate_system.py
-
-"""
-
-import re
-import os
-import sys
-import nose
-
-test_file = sys.argv[1]
-module = os.path.splitext(test_file)[0] # remove '.py' extension
-module = module.split('test_')[1] # remove 'test_' prefix
-regexp = "[\w\.]+%s"%module
-compexp = re.compile(regexp)
-
-cover_pkg = None
-fp = open(test_file, 'r')
-for line in fp:
- if line.startswith('from') or line.startswith('import'):
- pkg = re.search(regexp, line)
- if pkg:
- cover_pkg = pkg.group()
- break
-fp.close()
-
-if cover_pkg:
- cover_arg = '--cover-package=%s' % cover_pkg
- sys.argv += ['-sv', '--with-coverage', cover_arg]
- # Print out command for user feedback and debugging
- cmd = 'nosetests -sv --with-coverage %s %s' % (cover_arg, test_file)
- print cmd
- print
- nose.run()
-else:
- raise ValueError('Unable to find module %s imported in test file %s'
- % (module, test_file))
diff --git a/tools/update_requirements.py b/tools/update_requirements.py
new file mode 100755
index 0000000..92e9e83
--- /dev/null
+++ b/tools/update_requirements.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+import sys
+from pathlib import Path
+
+try:
+ import tomllib
+except ImportError:
+ import tomli as tomllib
+
+repo_root = Path(__file__).parent.parent
+pyproject_toml = repo_root / 'pyproject.toml'
+reqs = repo_root / 'requirements.txt'
+min_reqs = repo_root / 'min-requirements.txt'
+
+with open(pyproject_toml, 'rb') as fobj:
+ config = tomllib.load(fobj)
+ project = config['project']
+requirements = project['dependencies'] + project['optional-dependencies']['full']
+
+script_name = Path(__file__).relative_to(repo_root)
+
+lines = [
+ f'# Auto-generated by {script_name}',
+ '--only-binary numpy,scipy',
+ '--extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple',
+ '',
+]
+start = len(lines) - 1
+
+# Write requirements
+lines[start:-1] = requirements
+reqs.write_text('\n'.join(lines))
+
+# Write minimum requirements
+lines[start:-1] = [req.replace('>=', '==').replace('~=', '==') for req in requirements]
+min_reqs.write_text('\n'.join(lines))