Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TST] Tests and better debugging #41

Merged
merged 12 commits into from
May 14, 2018
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
data/
docs/generated/

# Byte-compiled / optimized / DLL files
Expand Down
36 changes: 32 additions & 4 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,39 @@ language: python
sudo: false

python:
- 3.6
- 3.6

matrix:
include:
- python: 3.6
env:
- STYLE=1
- python: 3.6
env:
- COVERAGE=1

before_install:
- python -m pip install --upgrade pip
- pip install "flake8<3.0" flake8-putty
- python -m pip install --upgrade pip
- pip install --upgrade virtualenv
- if [ "${STYLE}" == "1" ]; then
pip install "flake8<3.0" flake8-putty;
fi
- if [ "${COVERAGE}" == "1" ]; then
pip install coverage coveralls codecov pytest pytest-cov;
fi

script:
- flake8 tedana
- |
if [ "${STYLE}" == "1" ]; then
flake8 tedana
else
if [ "${COVERAGE}" == "1" ]; then
TEST_ARGS="--cov-report term-missing --cov=tedana";
fi
py.test --ignore=tedana/tests/test_tedana.py ${TEST_ARGS} tedana;
fi

after_success:
- if [ "${COVERAGE}" == "1" ]; then
coveralls; codecov;
fi
64 changes: 9 additions & 55 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -31,44 +31,9 @@ RUN apt-get update -qq && apt-get install -yq --no-install-recommends \
&& chmod -R 777 /neurodocker && chmod a+s /neurodocker
ENTRYPOINT ["/neurodocker/startup.sh"]

RUN apt-get update -qq && apt-get install -yq --no-install-recommends git vim libxml2-dev libnlopt-dev libxslt-dev\
RUN apt-get update -qq && apt-get install -yq --no-install-recommends git vim \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

#--------------------
# Install AFNI latest
#--------------------
ENV PATH=/opt/afni:$PATH
RUN apt-get update -qq && apt-get install -yq --no-install-recommends ed gsl-bin libglu1-mesa-dev libglib2.0-0 libglw1-mesa \
libgomp1 libjpeg62 libxm4 netpbm tcsh xfonts-base xvfb \
&& libs_path=/usr/lib/x86_64-linux-gnu \
&& if [ -f $libs_path/libgsl.so.19 ]; then \
ln $libs_path/libgsl.so.19 $libs_path/libgsl.so.0; \
fi \
# Install libxp (not in all ubuntu/debian repositories) \
&& apt-get install -yq --no-install-recommends libxp6 \
|| /bin/bash -c " \
curl --retry 5 -o /tmp/libxp6.deb -sSL http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \
&& dpkg -i /tmp/libxp6.deb && rm -f /tmp/libxp6.deb" \
# Install libpng12 (not in all ubuntu/debian repositories) \
&& apt-get install -yq --no-install-recommends libpng12-0 \
|| /bin/bash -c " \
curl -o /tmp/libpng12.deb -sSL http://mirrors.kernel.org/debian/pool/main/libp/libpng/libpng12-0_1.2.49-1%2Bdeb7u2_amd64.deb \
&& dpkg -i /tmp/libpng12.deb && rm -f /tmp/libpng12.deb" \
# Install R \
&& apt-get install -yq --no-install-recommends \
r-base-dev r-cran-rmpi \
|| /bin/bash -c " \
curl -o /tmp/install_R.sh -sSL https://gist.githubusercontent.com/kaczmarj/8e3792ae1af70b03788163c44f453b43/raw/0577c62e4771236adf0191c826a25249eb69a130/R_installer_debian_ubuntu.sh \
&& /bin/bash /tmp/install_R.sh" \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& echo "Downloading AFNI ..." \
&& mkdir -p /opt/afni \
&& curl -sSL --retry 5 https://afni.nimh.nih.gov/pub/dist/tgz/linux_openmp_64.tgz \
| tar zx -C /opt/afni --strip-components=1 \
&& /opt/afni/rPkgsInstall -pkgs ALL \
&& rm -rf /tmp/*
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

# Create new user: neuro
RUN useradd --no-user-group --create-home --shell /bin/bash neuro
Expand All @@ -93,25 +58,14 @@ RUN echo "Downloading Miniconda installer ..." \
#-----------------------------
# Create py3 conda environment
#-----------------------------
RUN conda create -y -q --name default --channel vida-nyu python=3.6.1 \
numpy pandas reprozip traits \
&& sync && conda clean -tipsy && sync \
&& /bin/bash -c "source activate default \
&& pip install -q --no-cache-dir \
nipype ipython scikit-learn scipy ipdb mdp nilearn nibabel>=2.1.0" \
&& sync
ENV PATH=/opt/conda/envs/default/bin:$PATH

#------------------------------
# Create py27 conda environment
#------------------------------
RUN conda create -y -q --name py27 python=2.7 \
numpy pandas reprozip traits \
RUN conda create -y -q --name py36 python=3.6 \
numpy scikit-learn mdp nilearn scipy 'nibabel>=2.1.0' \
&& sync && conda clean -tipsy && sync \
&& /bin/bash -c "source activate default \
&& pip install -q --no-cache-dir \
nipype ipython scikit-learn scipy ipdb mdp nilearn nibabel>=2.1.0" \
&& sync
&& bash -c "source activate py36 \
&& pip install -q --no-cache-dir \
https://github.com/ME-ICA/tedana/archive/master.tar.gz" \
&& sync \
&& sed -i '$isource activate py36' $ND_ENTRYPOINT

USER root

Expand Down
40 changes: 22 additions & 18 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,25 +1,28 @@
# tedana

TE-Dependent Analysis (_tedana_) is a Python module for denoising multi-echo fMRI data.
`TE`-`de`pendent `ana`lysis (_tedana_) is a Python module for denoising multi-echo functional magnetic resonance imaging (fMRI) data.

[![CircleCI](https://circleci.com/gh/ME-ICA/tedana.svg?style=shield)](https://circleci.com/gh/ME-ICA/tedana)
[![Documentation Status](https://readthedocs.org/projects/tedana/badge/?version=latest)](http://tedana.readthedocs.io/en/latest/?badge=latest)
[![Codecov](https://codecov.io/gh/me-ica/tedana/branch/master/graph/badge.svg)](https://codecov.io/gh/me-ica/tedana)
[![License](https://img.shields.io/badge/License-LGPL%202.0-blue.svg)](https://opensource.org/licenses/LGPL-2.1)
[![Join the chat at https://gitter.im/ME-ICA/tedana](https://badges.gitter.im/ME-ICA/tedana.svg)](https://gitter.im/ME-ICA/tedana?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)


## About

tedana is part of the ME-ICA pipeline, and therefore assumes that you're working with already preprocessed data. If you're in need of a preprocessing pipeline, we recommend [FMRIPREP](https://github.com/poldracklab/fmriprep/), which has been tested for compatibility with multi-echo fMRI data.
`tedana` originally came about as a part of the [`ME-ICA`](https://github.com/me-ica/me-ica) pipeline.
The ME-ICA pipeline orignially performed both pre-processing and TE-dependent analysis of multi-echo fMRI data; however, `tedana` now assumes that you're working with data which has been previously preprocessed.
If you're in need of a pre-processing pipeline, we recommend [`fmriprep`](https://github.com/poldracklab/fmriprep/) which has been tested for compatibility with multi-echo fMRI data and `tedana`.

### Why Multi-Echo?

Multi-echo fMRI data collection entails acquires multiple TEs (commonly called [echo times](http://mriquestions.com/tr-and-te.html)) for each collected fMRI volume.
Our signal of interest, Blood Oxygen-Level Dependent or [BOLD signal](http://www.fil.ion.ucl.ac.uk/spm/course/slides10-zurich/Kerstin_BOLD.pdf), is known to decay at a set rate within each fMRI volume.
Collecting multiple echos therefore allows us to infer if components of fMRI signal are BOLD-related or driven by acquisition artifacts, like participant motion.
For a review, see [Kundu et al. (2017), _NeuroImage_](https://paperpile.com/shared/eH3PPu).
Multi-echo fMRI data is obtained by acquiring multiple TEs (commonly called [echo times](http://mriquestions.com/tr-and-te.html)) for each MRI volume during data collection.
While fMRI signal contains important neural information (termed the blood oxygen-level dependent, or [BOLD signal](http://www.fil.ion.ucl.ac.uk/spm/course/slides10-zurich/Kerstin_BOLD.pdf)), it also contains "noise" (termed non-BOLD signal) caused by things like participant motion and changes in breathing.
Because the BOLD signal is known to decay at a set rate, collecting multiple echos allows us to assess whether components of the fMRI signal are BOLD- or non-BOLD.
For a comprehensive review, see [Kundu et al. (2017), _NeuroImage_](https://paperpile.com/shared/eH3PPu).

In tedana, we combine all collected echos, then decompose the resulting time series into components that can be classified as BOLD or non-BOLD based. This is performed in a series of steps including:
In `tedana`, we take the time series from all the collected TEs, combine them, and decompose the resulting data into components that can be classified as BOLD or non-BOLD. This is performed in a series of steps including:

* Principal components analysis
* Independent components analysis
Expand All @@ -29,7 +32,9 @@ More information and documentation can be found at https://tedana.readthedocs.io

## Installation

You'll need to set up a working development environment to use tedana. We provide a Dockerfile for this purpose (check out [tips on using Docker](https://neurohackweek.github.io/docker-for-scientists/)), or you can set up your environment locally. If you choose the latter, make sure the following packages are installed:
You'll need to set up a working development environment to use `tedana`.
We provide a Dockerfile for this purpose (check out [tips on using Docker](https://neurohackweek.github.io/docker-for-scientists/)), but you can also set up your environment locally.
If you choose the latter, the following packages will need to be installed as dependencies:

mdp
nilearn
Expand All @@ -38,21 +43,20 @@ numpy
pybids>=0.4.0
scikit-learn

tedana will eventually be hosted on PyPi. In the mean time, you can still install it with `pip` using:
`tedana` will eventually be hosted on PyPi. In the interim, you can still install it with `pip` using:

```
pip install https://github.com/ME-ICA/tedana/archive/master.tar.gz
```

## Development
## Getting involved

We :yellow_heart: new contributors ! To get started, check out [our contributing guidelines](https://github.com/ME-ICA/tedana/blob/master/CONTRIBUTING.md).
We :yellow_heart: new contributors !
To get started, check out [our contributing guidelines](https://github.com/ME-ICA/tedana/blob/master/CONTRIBUTING.md).

Want to learn more about our plans for developing tedana ? Check out [our roadmap](https://github.com/ME-ICA/tedana/projects). Have a question, comment, or suggestion ? Open or comment on one of [our issues](https://github.com/ME-ICA/tedana/issues) !
Want to learn more about our plans for developing `tedana` ?
Check out [our roadmap](https://github.com/ME-ICA/tedana/projects).
Have a question, comment, or suggestion ?
Open or comment on one of [our issues](https://github.com/ME-ICA/tedana/issues) !

We ask that all contributions to tedana respect our [code of conduct](https://github.com/ME-ICA/tedana/blob/master/Code_of_Conduct.md).

### :earth_americas: Mozilla Global Sprint (10-11 May, 2018) :earth_africa:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#mozsprint is over ! so sad 😢


This year, tedana will be participating in the [Mozilla Global Sprint](https://foundation.mozilla.org/opportunity/global-sprint/) !
Check out issues tagged [![mozsprint](https://img.shields.io/badge/-mozsprint-0052cc.svg)](https://github.com/ME-ICA/tedana/labels/mozsprint) for good places to get started during the sprint.
We ask that all contributions to `tedana` respect our [code of conduct](https://github.com/ME-ICA/tedana/blob/master/Code_of_Conduct.md).
9 changes: 4 additions & 5 deletions circle.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,28 +14,27 @@ jobs:
- checkout

- run:
name: download data
name: Download input test data
command: curl -L -o /home/neuro/data/zcat_ffd.nii.gz https://www.dropbox.com/s/ljeqskdmnc6si9d/zcat_ffd.nii.gz?dl=0

- run:
name: download and unzip processed data
name: Download expected output data
command: |
curl -L -o /home/neuro/data/test_TED.tar.gz https://www.dropbox.com/s/x5xhzs3x6p3ukjl/test_TED.tar.gz?dl=0
tar -xvzf /home/neuro/data/test_TED.tar.gz --no-same-owner -C /home/neuro/data/


- run:
name: install dependencies
name: Create test environment
command: |
conda create --name venv python=3.6
source activate venv
pip install pytest
pip install -r requirements.txt
python setup.py install

# run tests!
- run:
name: run tests
name: Run tests
command: |
source activate venv
py.test ./tedana/tests/test_tedana.py
Expand Down
3 changes: 1 addition & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
numpy
scikit-learn
scipy
mdp
nilearn
nibabel>=2.1.0
pybids>=0.4.0
nipype
duecredit
sphinx-argparse
numpydoc
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
""" meica setup script """
""" tedana setup script """


def main():
Expand All @@ -16,7 +16,7 @@ def main():
# For Python 3: use a locals dictionary
# http://stackoverflow.com/a/1463370/6820620
ldict = locals()
# Get version and release info, which is all stored in meica/info.py
# Get version and release info, which is all stored in tedana/info.py
module_file = op.join(this_path, 'tedana', 'info.py')
with open(module_file) as infofile:
pythoncode = [line for line in infofile.readlines() if not
Expand Down
33 changes: 30 additions & 3 deletions tedana/cli/run_tedana.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@

from tedana import workflows

import logging
logging.basicConfig(format='[%(levelname)s]: ++ %(message)s', level=logging.INFO)


def is_valid_file(parser, arg):
"""
Expand Down Expand Up @@ -138,7 +141,7 @@ def get_parser():
dest='filecsdata',
help='Save component selection data',
action='store_true',
default=True)
default=False)
parser.add_argument('--label',
dest='label',
type=str,
Expand All @@ -149,13 +152,37 @@ def get_parser():
type=int,
help='Seeded value for ICA, for reproducibility.',
default=42)
parser.add_argument('--debug',
dest='debug',
help=argparse.SUPPRESS,
action='store_true',
default=False)
parser.add_argument('--quiet',
dest='quiet',
help=argparse.SUPPRESS,
action='store_true',
default=False)
return parser


def main(argv=None):
"""Entry point"""
"""Tedana entry point"""
options = get_parser().parse_args(argv)
if options.debug and not options.quiet:
logging.getLogger().setLevel(logging.DEBUG)
elif options.quiet:
logging.getLogger().setLevel(logging.WARNING)
workflows.tedana.main(**vars(options))


def run_t2smap(argv=None):
"""T2smap entry point"""
options = get_parser().parse_args(argv)
workflows.tedana(**vars(options))
if options.debug and not options.quiet:
logging.getLogger().setLevel(logging.DEBUG)
elif options.quiet:
logging.getLogger().setLevel(logging.WARNING)
workflows.t2smap.main(**vars(options))


if __name__ == '__main__':
Expand Down
10 changes: 3 additions & 7 deletions tedana/decomposition/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,8 @@
import numpy as np
from scipy import stats

logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO)
LGR = logging.getLogger(__name__)

F_MAX = 500
Z_MAX = 8


def eimask(dd, ees=None):
"""
Expand All @@ -34,12 +30,12 @@ def eimask(dd, ees=None):
ees = range(dd.shape[1])
imask = np.zeros([dd.shape[0], len(ees)], dtype=bool)
for ee in ees:
LGR.info('++ Creating eimask for echo {}'.format(ee))
LGR.debug('Creating eimask for echo {}'.format(ee))
perc98 = stats.scoreatpercentile(dd[:, ee, :].flatten(), 98,
interpolation_method='lower')
lthr, hthr = 0.001 * perc98, 5 * perc98
LGR.info('++ Eimask threshold boundaries: '
'{:.03f} {:.03f}'.format(lthr, hthr))
LGR.debug('Eimask threshold boundaries: '
'{:.03f} {:.03f}'.format(lthr, hthr))
m = dd[:, ee, :].mean(axis=1)
imask[np.logical_and(m > lthr, m < hthr), ee] = True

Expand Down
Loading